source
stringlengths
3
92
c
stringlengths
26
2.25M
ast-dump-openmp-teams-distribute-parallel-for-simd.c
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s void test_one(int x) { #pragma omp target #pragma omp teams distribute parallel for simd for (int i = 0; i < x; i++) ; } void test_two(int x, int y) { #pragma omp target #pragma omp teams distribute parallel for simd for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_three(int x, int y) { #pragma omp target #pragma omp teams distribute parallel for simd collapse(1) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_four(int x, int y) { #pragma omp target #pragma omp teams distribute parallel for simd collapse(2) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_five(int x, int y, int z) { #pragma omp target #pragma omp teams distribute parallel for simd collapse(2) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) for (int i = 0; i < z; i++) ; } // CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK: |-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:3:1, line:8:1> line:3:6 test_one 'void (int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:22, line:8:1> // CHECK-NEXT: | `-OMPTargetDirective {{.*}} <line:4:1, col:19> // CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit> // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:6:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:5:1, col:47> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <col:1, col:47> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-OMPTeamsDistributeParallelForSimdDirective {{.*}} <col:1, col:47> // CHECK-NEXT: | | | | | `-CapturedStmt {{.*}} <line:6:3, line:7:5> // CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-CapturedStmt {{.*}} <line:6:3, line:7:5> // CHECK-NEXT: | | | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | | | |-ForStmt {{.*}} <line:6:3, line:7:5> // CHECK-NEXT: | | | | | | | | | |-DeclStmt {{.*}} <line:6:8, col:17> // CHECK-NEXT: | | | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | `-NullStmt {{.*}} <line:7:5> // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <line:5:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:5:1) *const restrict' // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <line:6:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:5:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:5:1) *const restrict' // CHECK-NEXT: | | | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | | | `-FieldDecl {{.*}} <line:6:23> col:23 implicit 'int &' // CHECK-NEXT: | | | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <col:3, line:7:5> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:6:8, col:17> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:7:5> // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:5:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:5:1) *const restrict' // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:6:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:4:1) *const restrict' // CHECK-NEXT: | | | | |-RecordDecl {{.*}} <line:5:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:6:23> col:23 implicit 'int &' // CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | |-CapturedStmt {{.*}} <col:3, line:7:5> // CHECK-NEXT: | | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | | |-ForStmt {{.*}} <line:6:3, line:7:5> // CHECK-NEXT: | | | | | | | | |-DeclStmt {{.*}} <line:6:8, col:17> // CHECK-NEXT: | | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-NullStmt {{.*}} <line:7:5> // CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <line:5:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:5:1) *const restrict' // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <line:6:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:5:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:5:1) *const restrict' // CHECK-NEXT: | | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | | `-FieldDecl {{.*}} <line:6:23> col:23 implicit 'int &' // CHECK-NEXT: | | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | |-ForStmt {{.*}} <col:3, line:7:5> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:6:8, col:17> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:7:5> // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:5:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:5:1) *const restrict' // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:6:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | |-OMPCapturedExprDecl {{.*}} <col:23> col:23 implicit used .capture_expr. 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | `-OMPCapturedExprDecl {{.*}} <col:3, <invalid sloc>> col:3 implicit used .capture_expr. 'int' // CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'int' '-' // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:3, col:26> 'int' '/' // CHECK-NEXT: | | | | | |-ParenExpr {{.*}} <col:3> 'int' // CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:23, col:3> 'int' '-' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int' // CHECK-NEXT: | | | | | | `-ParenExpr {{.*}} <col:3> 'int' // CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:16, <invalid sloc>> 'int' '+' // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:16, col:26> 'int' '-' // CHECK-NEXT: | | | | | | | |-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:4:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:6:23> col:23 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-OMPTeamsDistributeParallelForSimdDirective {{.*}} <line:5:1, col:47> // CHECK-NEXT: | | | `-CapturedStmt {{.*}} <line:6:3, line:7:5> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:6:3, line:7:5> // CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:6:3, line:7:5> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:6:8, col:17> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:7:5> // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:5:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:5:1) *const restrict' // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:6:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:5:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:5:1) *const restrict' // CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:6:23> col:23 implicit 'int &' // CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <col:3, line:7:5> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:6:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:7:5> // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:5:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:5:1) *const restrict' // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:6:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:4:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <line:5:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:6:23> col:23 implicit 'int &' // CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | |-CapturedStmt {{.*}} <col:3, line:7:5> // CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:6:3, line:7:5> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:6:8, col:17> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:7:5> // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:5:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:5:1) *const restrict' // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:6:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:5:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:5:1) *const restrict' // CHECK-NEXT: | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | `-FieldDecl {{.*}} <line:6:23> col:23 implicit 'int &' // CHECK-NEXT: | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | |-ForStmt {{.*}} <col:3, line:7:5> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:6:8, col:17> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:7:5> // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:5:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:5:1) *const restrict' // CHECK-NEXT: | | | `-VarDecl {{.*}} <line:6:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | |-OMPCapturedExprDecl {{.*}} <col:23> col:23 implicit used .capture_expr. 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | `-OMPCapturedExprDecl {{.*}} <col:3, <invalid sloc>> col:3 implicit used .capture_expr. 'int' // CHECK-NEXT: | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'int' '-' // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:3, col:26> 'int' '/' // CHECK-NEXT: | | | |-ParenExpr {{.*}} <col:3> 'int' // CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:23, col:3> 'int' '-' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int' // CHECK-NEXT: | | | | `-ParenExpr {{.*}} <col:3> 'int' // CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:16, <invalid sloc>> 'int' '+' // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:16, col:26> 'int' '-' // CHECK-NEXT: | | | | | |-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: |-FunctionDecl {{.*}} <line:10:1, line:16:1> line:10:6 test_two 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:22, col:26> col:26 used y 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:29, line:16:1> // CHECK-NEXT: | `-OMPTargetDirective {{.*}} <line:11:1, col:19> // CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit> // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:13:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:14:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:12:1, col:47> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <col:1, col:47> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-OMPTeamsDistributeParallelForSimdDirective {{.*}} <col:1, col:47> // CHECK-NEXT: | | | | | `-CapturedStmt {{.*}} <line:13:3, line:15:7> // CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-CapturedStmt {{.*}} <line:13:3, line:15:7> // CHECK-NEXT: | | | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | | | |-ForStmt {{.*}} <line:13:3, line:15:7> // CHECK-NEXT: | | | | | | | | | |-DeclStmt {{.*}} <line:13:8, col:17> // CHECK-NEXT: | | | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | `-ForStmt {{.*}} <line:14:5, line:15:7> // CHECK-NEXT: | | | | | | | | | |-DeclStmt {{.*}} <line:14:10, col:19> // CHECK-NEXT: | | | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | `-NullStmt {{.*}} <line:15:7> // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <line:12:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:12:1) *const restrict' // CHECK-NEXT: | | | | | | | | |-VarDecl {{.*}} <line:13:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <line:14:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | |-DeclRefExpr {{.*}} <line:13:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <line:14:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:12:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:12:1) *const restrict' // CHECK-NEXT: | | | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | | | |-FieldDecl {{.*}} <line:13:23> col:23 implicit 'int &' // CHECK-NEXT: | | | | | | | `-FieldDecl {{.*}} <line:14:25> col:25 implicit 'int &' // CHECK-NEXT: | | | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:13:3, line:15:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:13:8, col:17> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:14:5, line:15:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:14:10, col:19> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:15:7> // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:12:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:12:1) *const restrict' // CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:13:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:14:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:13:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:14:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:11:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:11:1) *const restrict' // CHECK-NEXT: | | | | |-RecordDecl {{.*}} <line:12:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:13:23> col:23 implicit 'int &' // CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:14:25> col:25 implicit 'int &' // CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | |-CapturedStmt {{.*}} <line:13:3, line:15:7> // CHECK-NEXT: | | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | | |-ForStmt {{.*}} <line:13:3, line:15:7> // CHECK-NEXT: | | | | | | | | |-DeclStmt {{.*}} <line:13:8, col:17> // CHECK-NEXT: | | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ForStmt {{.*}} <line:14:5, line:15:7> // CHECK-NEXT: | | | | | | | | |-DeclStmt {{.*}} <line:14:10, col:19> // CHECK-NEXT: | | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-NullStmt {{.*}} <line:15:7> // CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <line:12:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:12:1) *const restrict' // CHECK-NEXT: | | | | | | | |-VarDecl {{.*}} <line:13:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <line:14:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | |-DeclRefExpr {{.*}} <line:13:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <line:14:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:12:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:12:1) *const restrict' // CHECK-NEXT: | | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | | |-FieldDecl {{.*}} <line:13:23> col:23 implicit 'int &' // CHECK-NEXT: | | | | | | `-FieldDecl {{.*}} <line:14:25> col:25 implicit 'int &' // CHECK-NEXT: | | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:13:3, line:15:7> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:13:8, col:17> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:14:5, line:15:7> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:14:10, col:19> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:15:7> // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:12:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:12:1) *const restrict' // CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:13:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:14:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | |-OMPCapturedExprDecl {{.*}} <line:13:23> col:23 implicit used .capture_expr. 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | `-OMPCapturedExprDecl {{.*}} <col:3, <invalid sloc>> col:3 implicit used .capture_expr. 'int' // CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'int' '-' // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:3, col:26> 'int' '/' // CHECK-NEXT: | | | | | |-ParenExpr {{.*}} <col:3> 'int' // CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:23, col:3> 'int' '-' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int' // CHECK-NEXT: | | | | | | `-ParenExpr {{.*}} <col:3> 'int' // CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:16, <invalid sloc>> 'int' '+' // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:16, col:26> 'int' '-' // CHECK-NEXT: | | | | | | | |-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:14:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:11:1> col:1 implicit .global_tid. 'const int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:11:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:13:23> col:23 implicit 'int' // CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:14:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-OMPTeamsDistributeParallelForSimdDirective {{.*}} <line:12:1, col:47> // CHECK-NEXT: | | | `-CapturedStmt {{.*}} <line:13:3, line:15:7> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:13:3, line:15:7> // CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:13:3, line:15:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:13:8, col:17> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:14:5, line:15:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:14:10, col:19> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:15:7> // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:12:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:12:1) *const restrict' // CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:13:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:14:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:13:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:14:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:12:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:12:1) *const restrict' // CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:13:23> col:23 implicit 'int &' // CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:14:25> col:25 implicit 'int &' // CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:13:3, line:15:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:13:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:14:5, line:15:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:14:10, col:19> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:15:7> // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:12:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:12:1) *const restrict' // CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:13:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:14:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:13:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:14:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:11:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:11:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <line:12:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:13:23> col:23 implicit 'int &' // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:14:25> col:25 implicit 'int &' // CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | |-CapturedStmt {{.*}} <line:13:3, line:15:7> // CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:13:3, line:15:7> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:13:8, col:17> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:14:5, line:15:7> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:14:10, col:19> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:15:7> // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:12:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:12:1) *const restrict' // CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:13:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:14:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:13:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <line:14:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:12:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:12:1) *const restrict' // CHECK-NEXT: | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:13:23> col:23 implicit 'int &' // CHECK-NEXT: | | | | `-FieldDecl {{.*}} <line:14:25> col:25 implicit 'int &' // CHECK-NEXT: | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | |-ForStmt {{.*}} <line:13:3, line:15:7> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:13:8, col:17> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:14:5, line:15:7> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:14:10, col:19> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:15:7> // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:12:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:12:1) *const restrict' // CHECK-NEXT: | | | |-VarDecl {{.*}} <line:13:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | `-VarDecl {{.*}} <line:14:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | |-OMPCapturedExprDecl {{.*}} <line:13:23> col:23 implicit used .capture_expr. 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | `-OMPCapturedExprDecl {{.*}} <col:3, <invalid sloc>> col:3 implicit used .capture_expr. 'int' // CHECK-NEXT: | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'int' '-' // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:3, col:26> 'int' '/' // CHECK-NEXT: | | | |-ParenExpr {{.*}} <col:3> 'int' // CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:23, col:3> 'int' '-' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int' // CHECK-NEXT: | | | | `-ParenExpr {{.*}} <col:3> 'int' // CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:16, <invalid sloc>> 'int' '+' // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:16, col:26> 'int' '-' // CHECK-NEXT: | | | | | |-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | |-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:14:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: |-FunctionDecl {{.*}} <line:18:1, line:24:1> line:18:6 test_three 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:17, col:21> col:21 used x 'int' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:24, col:28> col:28 used y 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:31, line:24:1> // CHECK-NEXT: | `-OMPTargetDirective {{.*}} <line:19:1, col:19> // CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit> // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:21:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:22:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:20:1, col:59> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <col:1, col:59> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-OMPTeamsDistributeParallelForSimdDirective {{.*}} <col:1, col:59> // CHECK-NEXT: | | | | | |-OMPCollapseClause {{.*}} <col:48, col:58> // CHECK-NEXT: | | | | | | `-ConstantExpr {{.*}} <col:57> 'int' // CHECK-NEXT: | | | | | | |-value: Int 1 // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:57> 'int' 1 // CHECK-NEXT: | | | | | `-CapturedStmt {{.*}} <line:21:3, line:23:7> // CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-CapturedStmt {{.*}} <line:21:3, line:23:7> // CHECK-NEXT: | | | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | | | |-ForStmt {{.*}} <line:21:3, line:23:7> // CHECK-NEXT: | | | | | | | | | |-DeclStmt {{.*}} <line:21:8, col:17> // CHECK-NEXT: | | | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | `-ForStmt {{.*}} <line:22:5, line:23:7> // CHECK-NEXT: | | | | | | | | | |-DeclStmt {{.*}} <line:22:10, col:19> // CHECK-NEXT: | | | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | `-NullStmt {{.*}} <line:23:7> // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <line:20:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:20:1) *const restrict' // CHECK-NEXT: | | | | | | | | |-VarDecl {{.*}} <line:21:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <line:22:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | |-DeclRefExpr {{.*}} <line:21:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <line:22:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:20:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:20:1) *const restrict' // CHECK-NEXT: | | | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | | | |-FieldDecl {{.*}} <line:21:23> col:23 implicit 'int &' // CHECK-NEXT: | | | | | | | `-FieldDecl {{.*}} <line:22:25> col:25 implicit 'int &' // CHECK-NEXT: | | | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:21:3, line:23:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:21:8, col:17> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:22:5, line:23:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:22:10, col:19> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:23:7> // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:20:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:20:1) *const restrict' // CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:21:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:22:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:21:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:22:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:19:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:19:1) *const restrict' // CHECK-NEXT: | | | | |-RecordDecl {{.*}} <line:20:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:21:23> col:23 implicit 'int &' // CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:22:25> col:25 implicit 'int &' // CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | |-CapturedStmt {{.*}} <line:21:3, line:23:7> // CHECK-NEXT: | | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | | |-ForStmt {{.*}} <line:21:3, line:23:7> // CHECK-NEXT: | | | | | | | | |-DeclStmt {{.*}} <line:21:8, col:17> // CHECK-NEXT: | | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ForStmt {{.*}} <line:22:5, line:23:7> // CHECK-NEXT: | | | | | | | | |-DeclStmt {{.*}} <line:22:10, col:19> // CHECK-NEXT: | | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-NullStmt {{.*}} <line:23:7> // CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <line:20:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:20:1) *const restrict' // CHECK-NEXT: | | | | | | | |-VarDecl {{.*}} <line:21:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <line:22:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | |-DeclRefExpr {{.*}} <line:21:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <line:22:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:20:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:20:1) *const restrict' // CHECK-NEXT: | | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | | |-FieldDecl {{.*}} <line:21:23> col:23 implicit 'int &' // CHECK-NEXT: | | | | | | `-FieldDecl {{.*}} <line:22:25> col:25 implicit 'int &' // CHECK-NEXT: | | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:21:3, line:23:7> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:21:8, col:17> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:22:5, line:23:7> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:22:10, col:19> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:23:7> // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:20:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:20:1) *const restrict' // CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:21:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:22:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | |-OMPCapturedExprDecl {{.*}} <line:21:23> col:23 implicit used .capture_expr. 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | `-OMPCapturedExprDecl {{.*}} <col:3, <invalid sloc>> col:3 implicit used .capture_expr. 'int' // CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'int' '-' // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:3, col:26> 'int' '/' // CHECK-NEXT: | | | | | |-ParenExpr {{.*}} <col:3> 'int' // CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:23, col:3> 'int' '-' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int' // CHECK-NEXT: | | | | | | `-ParenExpr {{.*}} <col:3> 'int' // CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:16, <invalid sloc>> 'int' '+' // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:16, col:26> 'int' '-' // CHECK-NEXT: | | | | | | | |-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:22:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:19:1> col:1 implicit .global_tid. 'const int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:19:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:21:23> col:23 implicit 'int' // CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:22:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-OMPTeamsDistributeParallelForSimdDirective {{.*}} <line:20:1, col:59> // CHECK-NEXT: | | | |-OMPCollapseClause {{.*}} <col:48, col:58> // CHECK-NEXT: | | | | `-ConstantExpr {{.*}} <col:57> 'int' // CHECK-NEXT: | | | | |-value: Int 1 // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:57> 'int' 1 // CHECK-NEXT: | | | `-CapturedStmt {{.*}} <line:21:3, line:23:7> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:21:3, line:23:7> // CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:21:3, line:23:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:21:8, col:17> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:22:5, line:23:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:22:10, col:19> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:23:7> // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:20:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:20:1) *const restrict' // CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:21:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:22:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:21:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:22:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:20:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:20:1) *const restrict' // CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:21:23> col:23 implicit 'int &' // CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:22:25> col:25 implicit 'int &' // CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:21:3, line:23:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:21:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:22:5, line:23:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:22:10, col:19> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:23:7> // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:20:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:20:1) *const restrict' // CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:21:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:22:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:21:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:22:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:19:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:19:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <line:20:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:21:23> col:23 implicit 'int &' // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:22:25> col:25 implicit 'int &' // CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | |-CapturedStmt {{.*}} <line:21:3, line:23:7> // CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:21:3, line:23:7> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:21:8, col:17> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:22:5, line:23:7> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:22:10, col:19> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:23:7> // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:20:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:20:1) *const restrict' // CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:21:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:22:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:21:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <line:22:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:20:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:20:1) *const restrict' // CHECK-NEXT: | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:21:23> col:23 implicit 'int &' // CHECK-NEXT: | | | | `-FieldDecl {{.*}} <line:22:25> col:25 implicit 'int &' // CHECK-NEXT: | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | |-ForStmt {{.*}} <line:21:3, line:23:7> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:21:8, col:17> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:22:5, line:23:7> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:22:10, col:19> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:23:7> // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:20:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:20:1) *const restrict' // CHECK-NEXT: | | | |-VarDecl {{.*}} <line:21:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | `-VarDecl {{.*}} <line:22:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | |-OMPCapturedExprDecl {{.*}} <line:21:23> col:23 implicit used .capture_expr. 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | `-OMPCapturedExprDecl {{.*}} <col:3, <invalid sloc>> col:3 implicit used .capture_expr. 'int' // CHECK-NEXT: | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'int' '-' // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:3, col:26> 'int' '/' // CHECK-NEXT: | | | |-ParenExpr {{.*}} <col:3> 'int' // CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:23, col:3> 'int' '-' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int' // CHECK-NEXT: | | | | `-ParenExpr {{.*}} <col:3> 'int' // CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:16, <invalid sloc>> 'int' '+' // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:16, col:26> 'int' '-' // CHECK-NEXT: | | | | | |-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | |-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:22:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: |-FunctionDecl {{.*}} <line:26:1, line:32:1> line:26:6 test_four 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:30, line:32:1> // CHECK-NEXT: | `-OMPTargetDirective {{.*}} <line:27:1, col:19> // CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit> // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:29:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:30:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:28:1, col:59> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <col:1, col:59> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-OMPTeamsDistributeParallelForSimdDirective {{.*}} <col:1, col:59> // CHECK-NEXT: | | | | | |-OMPCollapseClause {{.*}} <col:48, col:58> // CHECK-NEXT: | | | | | | `-ConstantExpr {{.*}} <col:57> 'int' // CHECK-NEXT: | | | | | | |-value: Int 2 // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:57> 'int' 2 // CHECK-NEXT: | | | | | `-CapturedStmt {{.*}} <line:29:3, line:31:7> // CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-CapturedStmt {{.*}} <line:29:3, line:31:7> // CHECK-NEXT: | | | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | | | |-ForStmt {{.*}} <line:29:3, line:31:7> // CHECK-NEXT: | | | | | | | | | |-DeclStmt {{.*}} <line:29:8, col:17> // CHECK-NEXT: | | | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | `-ForStmt {{.*}} <line:30:5, line:31:7> // CHECK-NEXT: | | | | | | | | | |-DeclStmt {{.*}} <line:30:10, col:19> // CHECK-NEXT: | | | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | `-NullStmt {{.*}} <line:31:7> // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <line:28:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:28:1) *const restrict' // CHECK-NEXT: | | | | | | | | |-VarDecl {{.*}} <line:29:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <line:30:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | |-DeclRefExpr {{.*}} <line:29:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <line:30:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:28:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:28:1) *const restrict' // CHECK-NEXT: | | | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | | | |-FieldDecl {{.*}} <line:29:23> col:23 implicit 'int &' // CHECK-NEXT: | | | | | | | `-FieldDecl {{.*}} <line:30:25> col:25 implicit 'int &' // CHECK-NEXT: | | | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:29:3, line:31:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:29:8, col:17> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:30:5, line:31:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:30:10, col:19> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:31:7> // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:28:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:28:1) *const restrict' // CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:29:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:30:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:29:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:30:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:27:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:27:1) *const restrict' // CHECK-NEXT: | | | | |-RecordDecl {{.*}} <line:28:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:29:23> col:23 implicit 'int &' // CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:30:25> col:25 implicit 'int &' // CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | |-CapturedStmt {{.*}} <line:29:3, line:31:7> // CHECK-NEXT: | | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | | |-ForStmt {{.*}} <line:29:3, line:31:7> // CHECK-NEXT: | | | | | | | | |-DeclStmt {{.*}} <line:29:8, col:17> // CHECK-NEXT: | | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ForStmt {{.*}} <line:30:5, line:31:7> // CHECK-NEXT: | | | | | | | | |-DeclStmt {{.*}} <line:30:10, col:19> // CHECK-NEXT: | | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-NullStmt {{.*}} <line:31:7> // CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <line:28:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:28:1) *const restrict' // CHECK-NEXT: | | | | | | | |-VarDecl {{.*}} <line:29:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <line:30:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | |-DeclRefExpr {{.*}} <line:29:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <line:30:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:28:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:28:1) *const restrict' // CHECK-NEXT: | | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | | |-FieldDecl {{.*}} <line:29:23> col:23 implicit 'int &' // CHECK-NEXT: | | | | | | `-FieldDecl {{.*}} <line:30:25> col:25 implicit 'int &' // CHECK-NEXT: | | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:29:3, line:31:7> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:29:8, col:17> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:30:5, line:31:7> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:30:10, col:19> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:31:7> // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:28:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:28:1) *const restrict' // CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:29:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:30:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | |-OMPCapturedExprDecl {{.*}} <line:29:23> col:23 implicit used .capture_expr. 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | |-OMPCapturedExprDecl {{.*}} <line:30:25> col:25 implicit used .capture_expr. 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | `-OMPCapturedExprDecl {{.*}} <line:29:3, <invalid sloc>> col:3 implicit used .capture_expr. 'long' // CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'long' '-' // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:3, line:30:28> 'long' '*' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <line:29:3, col:26> 'long' <IntegralCast> // CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:3, col:26> 'int' '/' // CHECK-NEXT: | | | | | | |-ParenExpr {{.*}} <col:3> 'int' // CHECK-NEXT: | | | | | | | `-BinaryOperator {{.*}} <col:23, col:3> 'int' '-' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int' // CHECK-NEXT: | | | | | | | `-ParenExpr {{.*}} <col:3> 'int' // CHECK-NEXT: | | | | | | | `-BinaryOperator {{.*}} <col:16, <invalid sloc>> 'int' '+' // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:16, col:26> 'int' '-' // CHECK-NEXT: | | | | | | | | |-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <line:30:5, col:28> 'long' <IntegralCast> // CHECK-NEXT: | | | | | `-BinaryOperator {{.*}} <col:5, col:28> 'int' '/' // CHECK-NEXT: | | | | | |-ParenExpr {{.*}} <col:5> 'int' // CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:25, col:5> 'int' '-' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int' // CHECK-NEXT: | | | | | | `-ParenExpr {{.*}} <col:5> 'int' // CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:18, <invalid sloc>> 'int' '+' // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:18, col:28> 'int' '-' // CHECK-NEXT: | | | | | | | |-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:28> 'int' 1 // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:28> 'int' 1 // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <<invalid sloc>> 'long' <IntegralCast> // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:29:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:30:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:27:1> col:1 implicit .global_tid. 'const int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:27:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:29:23> col:23 implicit 'int' // CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:30:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-OMPTeamsDistributeParallelForSimdDirective {{.*}} <line:28:1, col:59> // CHECK-NEXT: | | | |-OMPCollapseClause {{.*}} <col:48, col:58> // CHECK-NEXT: | | | | `-ConstantExpr {{.*}} <col:57> 'int' // CHECK-NEXT: | | | | |-value: Int 2 // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:57> 'int' 2 // CHECK-NEXT: | | | `-CapturedStmt {{.*}} <line:29:3, line:31:7> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:29:3, line:31:7> // CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:29:3, line:31:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:29:8, col:17> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:30:5, line:31:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:30:10, col:19> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:31:7> // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:28:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:28:1) *const restrict' // CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:29:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:30:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:29:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:30:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:28:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:28:1) *const restrict' // CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:29:23> col:23 implicit 'int &' // CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:30:25> col:25 implicit 'int &' // CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:29:3, line:31:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:29:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:30:5, line:31:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:30:10, col:19> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:31:7> // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:28:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:28:1) *const restrict' // CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:29:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:30:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:29:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:30:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:27:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:27:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <line:28:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:29:23> col:23 implicit 'int &' // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:30:25> col:25 implicit 'int &' // CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | |-CapturedStmt {{.*}} <line:29:3, line:31:7> // CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:29:3, line:31:7> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:29:8, col:17> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:30:5, line:31:7> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:30:10, col:19> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:31:7> // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:28:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:28:1) *const restrict' // CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:29:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:30:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:29:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <line:30:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:28:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:28:1) *const restrict' // CHECK-NEXT: | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:29:23> col:23 implicit 'int &' // CHECK-NEXT: | | | | `-FieldDecl {{.*}} <line:30:25> col:25 implicit 'int &' // CHECK-NEXT: | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | |-ForStmt {{.*}} <line:29:3, line:31:7> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:29:8, col:17> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:30:5, line:31:7> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:30:10, col:19> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:31:7> // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:28:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:28:1) *const restrict' // CHECK-NEXT: | | | |-VarDecl {{.*}} <line:29:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | `-VarDecl {{.*}} <line:30:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | |-OMPCapturedExprDecl {{.*}} <line:29:23> col:23 implicit used .capture_expr. 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-OMPCapturedExprDecl {{.*}} <line:30:25> col:25 implicit used .capture_expr. 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | `-OMPCapturedExprDecl {{.*}} <line:29:3, <invalid sloc>> col:3 implicit used .capture_expr. 'long' // CHECK-NEXT: | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'long' '-' // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:3, line:30:28> 'long' '*' // CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <line:29:3, col:26> 'long' <IntegralCast> // CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:3, col:26> 'int' '/' // CHECK-NEXT: | | | | |-ParenExpr {{.*}} <col:3> 'int' // CHECK-NEXT: | | | | | `-BinaryOperator {{.*}} <col:23, col:3> 'int' '-' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int' // CHECK-NEXT: | | | | | `-ParenExpr {{.*}} <col:3> 'int' // CHECK-NEXT: | | | | | `-BinaryOperator {{.*}} <col:16, <invalid sloc>> 'int' '+' // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:16, col:26> 'int' '-' // CHECK-NEXT: | | | | | | |-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <line:30:5, col:28> 'long' <IntegralCast> // CHECK-NEXT: | | | `-BinaryOperator {{.*}} <col:5, col:28> 'int' '/' // CHECK-NEXT: | | | |-ParenExpr {{.*}} <col:5> 'int' // CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:25, col:5> 'int' '-' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int' // CHECK-NEXT: | | | | `-ParenExpr {{.*}} <col:5> 'int' // CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:18, <invalid sloc>> 'int' '+' // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:18, col:28> 'int' '-' // CHECK-NEXT: | | | | | |-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:28> 'int' 1 // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:28> 'int' 1 // CHECK-NEXT: | | `-ImplicitCastExpr {{.*}} <<invalid sloc>> 'long' <IntegralCast> // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:29:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:30:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: `-FunctionDecl {{.*}} <line:34:1, line:41:1> line:34:6 test_five 'void (int, int, int)' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:30, col:34> col:34 used z 'int' // CHECK-NEXT: `-CompoundStmt {{.*}} <col:37, line:41:1> // CHECK-NEXT: `-OMPTargetDirective {{.*}} <line:35:1, col:19> // CHECK-NEXT: |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit> // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:37:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:38:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:39:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: `-CapturedStmt {{.*}} <line:36:1, col:59> // CHECK-NEXT: |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | |-CapturedStmt {{.*}} <col:1, col:59> // CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | |-OMPTeamsDistributeParallelForSimdDirective {{.*}} <col:1, col:59> // CHECK-NEXT: | | | | |-OMPCollapseClause {{.*}} <col:48, col:58> // CHECK-NEXT: | | | | | `-ConstantExpr {{.*}} <col:57> 'int' // CHECK-NEXT: | | | | | |-value: Int 2 // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:57> 'int' 2 // CHECK-NEXT: | | | | `-CapturedStmt {{.*}} <line:37:3, line:40:9> // CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | |-CapturedStmt {{.*}} <line:37:3, line:40:9> // CHECK-NEXT: | | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | | |-ForStmt {{.*}} <line:37:3, line:40:9> // CHECK-NEXT: | | | | | | | | |-DeclStmt {{.*}} <line:37:8, col:17> // CHECK-NEXT: | | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ForStmt {{.*}} <line:38:5, line:40:9> // CHECK-NEXT: | | | | | | | | |-DeclStmt {{.*}} <line:38:10, col:19> // CHECK-NEXT: | | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ForStmt {{.*}} <line:39:7, line:40:9> // CHECK-NEXT: | | | | | | | | |-DeclStmt {{.*}} <line:39:12, col:21> // CHECK-NEXT: | | | | | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | | | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-NullStmt {{.*}} <line:40:9> // CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <line:36:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:36:1) *const restrict' // CHECK-NEXT: | | | | | | | |-VarDecl {{.*}} <line:37:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | |-VarDecl {{.*}} <line:38:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <line:39:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | | | | |-DeclRefExpr {{.*}} <line:37:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | |-DeclRefExpr {{.*}} <line:38:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <line:39:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:36:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:36:1) *const restrict' // CHECK-NEXT: | | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | | |-FieldDecl {{.*}} <line:37:23> col:23 implicit 'int &' // CHECK-NEXT: | | | | | | |-FieldDecl {{.*}} <line:38:25> col:25 implicit 'int &' // CHECK-NEXT: | | | | | | `-FieldDecl {{.*}} <line:39:27> col:27 implicit 'int &' // CHECK-NEXT: | | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:37:3, line:40:9> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:37:8, col:17> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:38:5, line:40:9> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:38:10, col:19> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:39:7, line:40:9> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:39:12, col:21> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:40:9> // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:36:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:36:1) *const restrict' // CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:37:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:38:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:39:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:37:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:38:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <line:39:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:35:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:35:1) *const restrict' // CHECK-NEXT: | | | |-RecordDecl {{.*}} <line:36:1> col:1 implicit struct definition // CHECK-NEXT: | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:37:23> col:23 implicit 'int &' // CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:38:25> col:25 implicit 'int &' // CHECK-NEXT: | | | | `-FieldDecl {{.*}} <line:39:27> col:27 implicit 'int &' // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:37:3, line:40:9> // CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:37:3, line:40:9> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:37:8, col:17> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:38:5, line:40:9> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:38:10, col:19> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:39:7, line:40:9> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:39:12, col:21> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:40:9> // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:36:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:36:1) *const restrict' // CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:37:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:38:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:39:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:37:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:38:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:39:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:36:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:36:1) *const restrict' // CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:37:23> col:23 implicit 'int &' // CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:38:25> col:25 implicit 'int &' // CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:39:27> col:27 implicit 'int &' // CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:37:3, line:40:9> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:37:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:38:5, line:40:9> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:38:10, col:19> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:39:7, line:40:9> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:39:12, col:21> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:40:9> // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:36:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:36:1) *const restrict' // CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:37:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:38:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:39:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | |-OMPCapturedExprDecl {{.*}} <line:37:23> col:23 implicit used .capture_expr. 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-OMPCapturedExprDecl {{.*}} <line:38:25> col:25 implicit used .capture_expr. 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | `-OMPCapturedExprDecl {{.*}} <line:37:3, <invalid sloc>> col:3 implicit used .capture_expr. 'long' // CHECK-NEXT: | | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'long' '-' // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:3, line:38:28> 'long' '*' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <line:37:3, col:26> 'long' <IntegralCast> // CHECK-NEXT: | | | | | `-BinaryOperator {{.*}} <col:3, col:26> 'int' '/' // CHECK-NEXT: | | | | | |-ParenExpr {{.*}} <col:3> 'int' // CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:23, col:3> 'int' '-' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int' // CHECK-NEXT: | | | | | | `-ParenExpr {{.*}} <col:3> 'int' // CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:16, <invalid sloc>> 'int' '+' // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:16, col:26> 'int' '-' // CHECK-NEXT: | | | | | | | |-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <line:38:5, col:28> 'long' <IntegralCast> // CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:5, col:28> 'int' '/' // CHECK-NEXT: | | | | |-ParenExpr {{.*}} <col:5> 'int' // CHECK-NEXT: | | | | | `-BinaryOperator {{.*}} <col:25, col:5> 'int' '-' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int' // CHECK-NEXT: | | | | | `-ParenExpr {{.*}} <col:5> 'int' // CHECK-NEXT: | | | | | `-BinaryOperator {{.*}} <col:18, <invalid sloc>> 'int' '+' // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:18, col:28> 'int' '-' // CHECK-NEXT: | | | | | | |-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:28> 'int' 1 // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:28> 'int' 1 // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <<invalid sloc>> 'long' <IntegralCast> // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:37:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:38:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:39:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:35:1> col:1 implicit .global_tid. 'const int' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:35:1) *const restrict' // CHECK-NEXT: | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | |-FieldDecl {{.*}} <line:37:23> col:23 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | |-FieldDecl {{.*}} <line:38:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | `-FieldDecl {{.*}} <line:39:27> col:27 implicit 'int' // CHECK-NEXT: | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | |-OMPTeamsDistributeParallelForSimdDirective {{.*}} <line:36:1, col:59> // CHECK-NEXT: | | |-OMPCollapseClause {{.*}} <col:48, col:58> // CHECK-NEXT: | | | `-ConstantExpr {{.*}} <col:57> 'int' // CHECK-NEXT: | | | |-value: Int 2 // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:57> 'int' 2 // CHECK-NEXT: | | `-CapturedStmt {{.*}} <line:37:3, line:40:9> // CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | |-CapturedStmt {{.*}} <line:37:3, line:40:9> // CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:37:3, line:40:9> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:37:8, col:17> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:38:5, line:40:9> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:38:10, col:19> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:39:7, line:40:9> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:39:12, col:21> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:40:9> // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:36:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:36:1) *const restrict' // CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:37:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:38:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:39:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:37:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:38:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <line:39:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:36:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:36:1) *const restrict' // CHECK-NEXT: | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:37:23> col:23 implicit 'int &' // CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:38:25> col:25 implicit 'int &' // CHECK-NEXT: | | | | `-FieldDecl {{.*}} <line:39:27> col:27 implicit 'int &' // CHECK-NEXT: | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | |-ForStmt {{.*}} <line:37:3, line:40:9> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:37:8, col:17> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:38:5, line:40:9> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:38:10, col:19> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:39:7, line:40:9> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:39:12, col:21> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:40:9> // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:36:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:36:1) *const restrict' // CHECK-NEXT: | | | |-VarDecl {{.*}} <line:37:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-VarDecl {{.*}} <line:38:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | `-VarDecl {{.*}} <line:39:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:37:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:38:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:39:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:35:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:35:1) *const restrict' // CHECK-NEXT: | |-RecordDecl {{.*}} <line:36:1> col:1 implicit struct definition // CHECK-NEXT: | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | |-FieldDecl {{.*}} <line:37:23> col:23 implicit 'int &' // CHECK-NEXT: | | |-FieldDecl {{.*}} <line:38:25> col:25 implicit 'int &' // CHECK-NEXT: | | `-FieldDecl {{.*}} <line:39:27> col:27 implicit 'int &' // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:37:3, line:40:9> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:37:3, line:40:9> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:37:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:38:5, line:40:9> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:38:10, col:19> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:39:7, line:40:9> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:39:12, col:21> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:40:9> // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:36:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:36:1) *const restrict' // CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:37:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:38:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:39:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:37:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:38:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:39:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:36:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:36:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:37:23> col:23 implicit 'int &' // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:38:25> col:25 implicit 'int &' // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:39:27> col:27 implicit 'int &' // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-ForStmt {{.*}} <line:37:3, line:40:9> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:37:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ForStmt {{.*}} <line:38:5, line:40:9> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:38:10, col:19> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ForStmt {{.*}} <line:39:7, line:40:9> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:39:12, col:21> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:40:9> // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:36:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:36:1) *const restrict' // CHECK-NEXT: | | |-VarDecl {{.*}} <line:37:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | |-VarDecl {{.*}} <line:38:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | `-VarDecl {{.*}} <line:39:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | |-OMPCapturedExprDecl {{.*}} <line:37:23> col:23 implicit used .capture_expr. 'int' // CHECK-NEXT: | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | |-OMPCapturedExprDecl {{.*}} <line:38:25> col:25 implicit used .capture_expr. 'int' // CHECK-NEXT: | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | `-OMPCapturedExprDecl {{.*}} <line:37:3, <invalid sloc>> col:3 implicit used .capture_expr. 'long' // CHECK-NEXT: | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'long' '-' // CHECK-NEXT: | |-BinaryOperator {{.*}} <col:3, line:38:28> 'long' '*' // CHECK-NEXT: | | |-ImplicitCastExpr {{.*}} <line:37:3, col:26> 'long' <IntegralCast> // CHECK-NEXT: | | | `-BinaryOperator {{.*}} <col:3, col:26> 'int' '/' // CHECK-NEXT: | | | |-ParenExpr {{.*}} <col:3> 'int' // CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:23, col:3> 'int' '-' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int' // CHECK-NEXT: | | | | `-ParenExpr {{.*}} <col:3> 'int' // CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:16, <invalid sloc>> 'int' '+' // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:16, col:26> 'int' '-' // CHECK-NEXT: | | | | | |-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | `-ImplicitCastExpr {{.*}} <line:38:5, col:28> 'long' <IntegralCast> // CHECK-NEXT: | | `-BinaryOperator {{.*}} <col:5, col:28> 'int' '/' // CHECK-NEXT: | | |-ParenExpr {{.*}} <col:5> 'int' // CHECK-NEXT: | | | `-BinaryOperator {{.*}} <col:25, col:5> 'int' '-' // CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int' // CHECK-NEXT: | | | `-ParenExpr {{.*}} <col:5> 'int' // CHECK-NEXT: | | | `-BinaryOperator {{.*}} <col:18, <invalid sloc>> 'int' '+' // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:18, col:28> 'int' '-' // CHECK-NEXT: | | | | |-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:28> 'int' 1 // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:28> 'int' 1 // CHECK-NEXT: | `-ImplicitCastExpr {{.*}} <<invalid sloc>> 'long' <IntegralCast> // CHECK-NEXT: | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: |-DeclRefExpr {{.*}} <line:37:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: |-DeclRefExpr {{.*}} <line:38:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: `-DeclRefExpr {{.*}} <line:39:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
tentusscher_epi_2004_S2_2.c
#include <assert.h> #include <stdlib.h> #include "tentusscher_epi_2004_S2_2.h" GET_CELL_MODEL_DATA(init_cell_model_data) { assert(cell_model); if(get_initial_v) cell_model->initial_v = INITIAL_V; if(get_neq) cell_model->number_of_ode_equations = NEQ; } SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) { // Default initial conditions /* sv[0] = INITIAL_V; // V; millivolt sv[1] = 0.f; //M sv[2] = 0.75; //H sv[3] = 0.75f; //J sv[4] = 0.f; //Xr1 sv[5] = 1.f; //Xr2 sv[6] = 0.f; //Xs sv[7] = 1.f; //S sv[8] = 0.f; //R sv[9] = 0.f; //D sv[10] = 1.f; //F sv[11] = 1.f; //FCa sv[12] = 1.f; //G sv[13] = 0.0002; //Cai sv[14] = 0.2f; //CaSR sv[15] = 11.6f; //Nai sv[16] = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.5236591284772,0.00130241284471985,0.778613483022969,0.778472769811598,0.000175875277625194,0.484626058693879,0.00294965177778795,0.999998333317616,1.94791112184908e-08,1.90234417053386e-05,0.999779558473224,1.00713872511970,0.999995965310622,4.41551215458988e-05,0.567040008888733,10.2464162625462,139.303734550690}; for (uint32_t i = 0; i < NEQ; i++) sv[i] = sv_sst[i]; } SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) { uint32_t sv_id; int i; #pragma omp parallel for private(sv_id) for (i = 0; i < num_cells_to_solve; i++) { if(cells_to_solve) sv_id = cells_to_solve[i]; else sv_id = i; for (int j = 0; j < num_steps; ++j) { solve_model_ode_cpu(dt, sv + (sv_id * NEQ), stim_currents[i]); } } } void solve_model_ode_cpu(real dt, real *sv, real stim_current) { assert(sv); real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu(rY, rDY, stim_current, dt); for(int i = 0; i < NEQ; i++) sv[i] = rDY[i]; } void RHS_cpu(const real *sv, real *rDY_, real stim_current, real dt) { // State variables real svolt = sv[0]; real sm = sv[1]; real sh = sv[2]; real sj = sv[3]; real sxr1 = sv[4]; real sxr2 = sv[5]; real sxs = sv[6]; real ss = sv[7]; real sr = sv[8]; real sd = sv[9]; real sf = sv[10]; real sfca = sv[11]; real sg = sv[12]; real Cai = sv[13]; real CaSR = sv[14]; real Nai = sv[15]; real Ki = sv[16]; //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; // [!] Epicardium cell real Gks=0.245; //Parameters for Ik1 real GK1=5.405; //Parameters for Ito // [!] Epicardium cell real Gto=0.294; //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; real parameters []={14.2751110459407,0.000197490405913840,0.000138093676576538,0.000459611951400222,0.248312214169369,0.146550920650185,0.141336894566835,4.51002424199619,0.0147942147525980,1.60874334855823,1098.91591518736,0.000497071049372500,0.357179450926053,0.0190817376935230,0.00515881032161095,3.63348608264117e-05}; GNa=parameters[0]; GbNa=parameters[1]; GCaL=parameters[2]; GbCa=parameters[3]; Gto=parameters[4]; Gkr=parameters[5]; Gks=parameters[6]; GK1=parameters[7]; GpK=parameters[8]; knak=parameters[9]; knaca=parameters[10]; Vmaxup=parameters[11]; GpCa=parameters[12]; real arel=parameters[13]; real crel=parameters[14]; real Vleak=parameters[15]; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel; Irel=A*sd*sg; Ileak=Vleak*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); //TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; }
GB_unaryop__minv_int8_fp64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_int8_fp64 // op(A') function: GB_tran__minv_int8_fp64 // C type: int8_t // A type: double // cast: int8_t cij ; GB_CAST_SIGNED(cij,aij,8) // unaryop: cij = GB_IMINV_SIGNED (aij, 8) #define GB_ATYPE \ double #define GB_CTYPE \ int8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_SIGNED (x, 8) ; // casting #define GB_CASTING(z, x) \ int8_t z ; GB_CAST_SIGNED(z,x,8) ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_INT8 || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_int8_fp64 ( int8_t *restrict Cx, const double *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_int8_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
LBLT.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <sys/time.h> #include <omp.h> #include <immintrin.h> int binary_search_right_boundary_kernel_LBLT(const int *row_pointer, const int key_input, const int size) { int start = 0; int stop = size - 1; int median; int key_median; while (stop >= start) { median = (stop + start) / 2; key_median = row_pointer[median]; if (key_input >= key_median) start = median + 1; else stop = median - 1; } return start; } void Dot_Product_Avx2_dLBLT(int len, const int *indx, const double *Val, const double *X, double *res) { const int *colIndPtr = indx; const double *matValPtr = (double *) Val; const double *x = (double *) X; int j; double result = 0.0; __m256d vec_y; vec_y = _mm256_setzero_pd(); int nnzThisLine = len; int k_iter = nnzThisLine / 4; int k_rem = nnzThisLine % 4; //Loop in multiples of 4 non-zeroes for (j = 0; j < k_iter; j++) { vec_y = _mm256_fmadd_pd( *((__m256d_u *) (matValPtr)), _mm256_set_pd(x[*(colIndPtr + 3)], x[*(colIndPtr + 2)], x[*(colIndPtr + 1)], x[*(colIndPtr)]), vec_y); matValPtr += 4; colIndPtr += 4; } // Horizontal addition if (k_iter) { // sum[0] += sum[1] ; sum[2] += sum[3] vec_y = _mm256_hadd_pd(vec_y, vec_y); // Cast avx_sum to 128 bit to obtain sum[0] and sum[1] __m128d sum_lo = _mm256_castpd256_pd128(vec_y); // Extract 128 bits to obtain sum[2] and sum[3] __m128d sum_hi = _mm256_extractf128_pd(vec_y, 1); // Add remaining two sums __m128d sse_sum = _mm_add_pd(sum_lo, sum_hi); // Store result result = sse_sum[0]; } //Remainder loop for nnzThisLine%4 for (j = 0; j < k_rem; j++) { result += *matValPtr++ * x[*colIndPtr++]; } *(double *) res = result; } //int main(int argc, char ** argv) int spmvLBLT(int m,int n,int nnzR,int* RowPtr,int* ColIdx,double*Val,double* GFlops_LBLT,double* Time_LBLT,double* time_pre,double* LBLT_error) { //char *filename = argv[1]; //printf ("filename = %s\n", filename); //read matrix //int m, n, nnzR, isSymmetric; //mmio_info(&m, &n, &nnzR, &isSymmetric, filename); //int *RowPtr = (int *)malloc((m+1) * sizeof(int)); //int *ColIdx = (int *)malloc(nnzR * sizeof(int)); //double *Val = (double *)malloc(nnzR * sizeof(double)); //mmio_data(RowPtr, ColIdx, Val, filename); for (int i = 0; i < nnzR; i++) Val[i] = 1; //printf("The order of the rating matrix R is %i by %i, #nonzeros = %i\n",m, n, nnzR); //create X, Y,Y_golden double *X = (double *)malloc(sizeof(double) * (n+1)); double *Y = (double *)malloc(sizeof(double) * (m+1)); double *Y_golden = (double *)malloc(sizeof(double) * (m+1)); memset (X, 0, sizeof(double) * (n+1)); memset (Y, 0, sizeof(double) * (m+1)); memset (Y_golden, 0, sizeof(double) * (m+1)); for (int i = 0; i < n; i++) X[i] = 1; for (int i = 0; i < m; i++) for(int j = RowPtr[i]; j < RowPtr[i+1]; j++) Y_golden[i] += Val[j] * X[ColIdx[j]]; //int nthreads = atoi(argv[2]); //omp_set_num_threads(nthreads); //printf("#threads is %i \n", nthreads); int nthreads = omp_get_max_threads(); //printf("omp_num_thread_LBL = %i\n", nthreads); //int iter = atoi(argv[3]); //printf("#iter is %i \n", iter); int iter = 500; struct timeval t1, t2, t3; gettimeofday(&t1, NULL); int *csrSplitter_yid = (int *)malloc((nthreads+1) * sizeof(int)); int stridennz = ceil((double)nnzR/(double)nthreads); //#pragma omp parallel for for (int tid = 0; tid <= nthreads; tid++) { // compute partition boundaries by partition of size stride int boundary_yid = tid * stridennz; // clamp partition boundaries to [0, nnzR] boundary_yid = boundary_yid > nnzR ? nnzR : boundary_yid; // binary search csrSplitter_yid[tid] = binary_search_right_boundary_kernel_LBLT(RowPtr, boundary_yid, m + 1) - 1; //printf("csrSplitter_yid[%d] is %d\n", tid, csrSplitter_yid[tid]); } gettimeofday(&t2, NULL); int *Apinter = (int *)malloc(nthreads * sizeof(int)); memset(Apinter, 0, nthreads *sizeof(int) ); //每个线程执行行数 //#pragma omp parallel for for (int tid = 0; tid < nthreads; tid++) { Apinter[tid] = csrSplitter_yid[tid+1] - csrSplitter_yid[tid]; //printf("A[%d] is %d\n", tid, Apinter[tid]); } int *Bpinter = (int *)malloc(nthreads * sizeof(int)); memset(Bpinter, 0, nthreads *sizeof(int) ); //每个线程执行非零元数 //#pragma omp parallel for for (int tid = 0; tid < nthreads; tid++) { int num = 0; for (int u = csrSplitter_yid[tid]; u < csrSplitter_yid[tid+1]; u++) { num += RowPtr[ u + 1 ] - RowPtr[u]; } Bpinter[tid] = num; //printf("B [%d]is %d\n",tid, Bpinter[tid]); } int *Yid = (int *)malloc(sizeof(int) * nthreads); memset (Yid, 0, sizeof(int) * nthreads); //每个线程 int flag = -2; //#pragma omp parallel for for (int tid = 0; tid < nthreads; tid++) { //printf("tid = %i, csrSplitter: %i -> %i\n", tid, csrSplitter_yid[tid], csrSplitter_yid[tid+1]); if (csrSplitter_yid[tid + 1] - csrSplitter_yid[tid] == 0 && tid != 0) { Yid[tid] = csrSplitter_yid[tid]; flag = 1; } else if (flag == 1) { Yid[tid] = csrSplitter_yid[tid]; flag = -2; } else { Yid[tid] = -1; } } //行平均用在多行上 //int sto = nthreads > nnzR ? nthreads : nnzR; int *Start1 = (int *)malloc(sizeof(int) * nthreads); memset (Start1, 0, sizeof(int) * nthreads); int *End1 = (int *)malloc(sizeof(int) * nthreads); memset (End1, 0, sizeof(int) * nthreads); int *label = (int *)malloc(sizeof(int) * nthreads); memset (label, 0, sizeof(int) * nthreads); int start1, search1 = 0; //#pragma omp parallel for for (int tid = 0;tid < nthreads;tid++) { if (Apinter[tid] == 0) { if(search1 == 0) { start1 = tid; search1 = 1; } } if(search1 == 1 && Apinter[tid]!= 0) { int nntz = floor((double)Apinter[tid] / (double)(tid-start1+1)); if( nntz != 0) { for(int i = start1;i <= tid;i++) { label[i] = i; } } else if((tid-start1+1) >= Apinter[tid] && Apinter[tid] != 0) { for(int i = start1;i <= tid;i++) { label[i] = i; } } int mntz = Apinter[tid] - (nntz * (tid-start1)); //start and end int n = start1; Start1[n] = csrSplitter_yid[tid]; End1[n] = Start1[n] + nntz; //printf("start1a[%d] = %d, end1a[%d] = %d\n",n,Start1[n],n, End1[n]); for (int p = start1 + 1; p <= tid ; p++) { if(p == tid) { Start1[p] = End1[p - 1]; End1[p] = Start1[p] + mntz; } else { Start1[p] = End1[p-1]; End1[p] = Start1[p] + nntz; } //printf("start1b[%d] = %d, end1b[%d] = %d\n",n,Start1[n],n, End1[n]); } search1 = 0; } } //非零元平均用在行数小于线程数 double *Ypartialsum = (double *)malloc(sizeof(double) * nthreads); memset (Ypartialsum, 0, sizeof(double) * nthreads); double *Ysum = (double *)malloc(sizeof(double) * nthreads); memset (Ysum, 0, sizeof(double) * nthreads); int *Start2 = (int *)malloc(sizeof(int) * nthreads); memset (Start2, 0, sizeof(int) * nthreads); int *End2 = (int *)malloc(sizeof(int) * nthreads); memset (End2, 0, sizeof(int) * nthreads); int start2, search2 = 0; //#pragma omp parallel for for (int tid = 0;tid < nthreads;tid++) { if (Bpinter[tid] == 0) { if(search2 == 0) { start2 = tid; search2 = 1; } } if(search2 == 1 && Bpinter[tid]!= 0) { int nntz2 = floor((double)Bpinter[tid] / (double)(tid-start2+1)); int mntz2 = Bpinter[tid] - (nntz2 * (tid-start2)); //start and end int n = start2; for (int i = start2; i >= 0; i--) { Start2[n] += Bpinter[i]; End2[n] = Start2[n] + nntz2; } for (n = start2 + 1; n < tid ; n++) { Start2[n] = End2[n-1]; End2[n] = Start2[n] + nntz2; } if (n == tid) { Start2[n] = End2[n - 1]; End2[n] = Start2[n] + mntz2; } search2 = 0; } } gettimeofday(&t3, NULL); double time_LBL_pre = ((t2.tv_sec - t1.tv_sec) * 1000.0 + (t2.tv_usec - t1.tv_usec) / 1000.0); double time_LBLT_pre = ((t3.tv_sec - t1.tv_sec) * 1000.0 + (t3.tv_usec - t1.tv_usec) / 1000.0); for(int tid = 0; tid < nthreads; tid++) { if(Yid[tid] != -1) { time_pre[2] = time_LBLT_pre; } else { time_pre[2] = time_LBL_pre; } } //-----------------------------------parallel_omp_balanced_Yid------------------------------------- int currentiter = 0; gettimeofday(&t1, NULL); for (currentiter = 0; currentiter < iter; currentiter++) { #pragma omp parallel for for (int tid = 0; tid < nthreads; tid++) Y[Yid[tid]] = 0; #pragma omp parallel for for (int tid = 0; tid < nthreads; tid++) { if (Yid[tid] == -1) { for (int u = csrSplitter_yid[tid]; u < csrSplitter_yid[tid+1]; u++) { double sum = 0; for (int j = RowPtr[u]; j < RowPtr[u + 1]; j++) { sum += Val[j] * X[ColIdx[j]]; } Y[u] = sum; } } if (label[tid] != 0) { for (int u = Start1[tid]; u < End1[tid]; u++) { double sum = 0; for (int j = RowPtr[u]; j < RowPtr[u + 1]; j++) { sum += Val[j] * X[ColIdx[j]]; } Y[u] = sum; } } if (Yid[tid] != -1 && label[tid] == 0) { Ysum[tid] = 0; Ypartialsum[tid] = 0; for (int j = Start2[tid]; j < End2[tid]; j++) { Ypartialsum[tid] += Val[j] * X[ColIdx[j]]; } Ysum[tid] += Ypartialsum[tid]; Y[Yid[tid]] += Ysum[tid]; } } } gettimeofday(&t2, NULL); double time_balanced2 = ((t2.tv_sec - t1.tv_sec) * 1000.0 + (t2.tv_usec - t1.tv_usec) / 1000.0) / iter; double GFlops_balanced2 = 2 * nnzR / time_balanced2 / pow(10,6); int errorcount_balanced2 = 0; for (int i = 0; i < m; i++) if (Y[i] != Y_golden[i]) errorcount_balanced2++; //printf("time_LBLT = %f\n", time_balanced2); //printf("errorcount_LBLT = %i\n", errorcount_balanced2); //printf("GFlops_balanced2 = %f\n", GFlops_balanced2); GFlops_LBLT[0] = GFlops_balanced2; Time_LBLT[0] = time_balanced2; LBLT_error[0] = errorcount_balanced2; //----------------------------------------------------------------------- //------------------------------------parallel_omp_balanced_avx2_Yid------------------------------------ gettimeofday(&t1, NULL); for (currentiter = 0; currentiter < iter; currentiter++) { #pragma omp parallel for for (int tid = 0; tid < nthreads; tid++) Y[Yid[tid]] = 0; #pragma omp parallel for for (int tid = 0; tid < nthreads; tid++) { if (Yid[tid] == -1) { //printf("%d %d\n",tid,csrSplitter[tid]); for (int u = csrSplitter_yid[tid]; u < csrSplitter_yid[tid + 1]; u++) { Dot_Product_Avx2_dLBLT(RowPtr[u + 1] - RowPtr[u], ColIdx + RowPtr[u], Val, X, Y + u); } } else if (label[tid] != 0) { for (int u = Start1[tid]; u < End1[tid]; u++) { Dot_Product_Avx2_dLBLT( RowPtr[u + 1] - RowPtr[u], ColIdx + RowPtr[u], Val, X, Y + u); } } if (Yid[tid] != -1 && label[tid] == 0) { Dot_Product_Avx2_dLBLT( End2[tid] - Start2[tid], ColIdx + Start2[tid], Val + Start2[tid], X, Ysum + tid); } } } gettimeofday(&t2, NULL); double time_balanced2_avx = ((t2.tv_sec - t1.tv_sec) * 1000.0 + (t2.tv_usec - t1.tv_usec) / 1000.0) / iter; double GFlops_balanced2_avx = 2 * nnzR / time_balanced2_avx / pow(10,6); int errorcount_balanced2_avx = 0; for (int i = 0; i < m; i++) if (Y[i] != Y_golden[i]) errorcount_balanced2_avx++; //printf("time_balanced2_avx = %f\n", time_balanced2_avx); //printf("errorcount_balanced2_avx = %i\n", errorcount_balanced2_avx); //printf("GFlops_balanced2_avx = %f\n", GFlops_balanced2_avx); GFlops_LBLT[1] = GFlops_balanced2_avx; Time_LBLT[1] = time_balanced2_avx; LBLT_error[1] = errorcount_balanced2_avx; //------------------------------------------------------------------------ return 0; }
Calculate_AOs_fit.c
#include "num_of_threads.h" #include<omp.h> #include"utils.h" #include"structs.h" #include"matrix_ops.h" #include"globals.h" void Calculate_AOs_fit(int *tlist,double *vlist,int nfac,int nvert,double *angles,AOstruct *AOs,double *offset,double *D,int dm,int dn,double *Weight,double *scale,double *FT,double *FTdv,double* FTdS,double *Albedo,double *Alimit,double *dA,int deriv) { /*tlist,vlist,angles -the asteroid shape * AO struct contains the AO data * offset naox2 vector, offsets, * D is the derivative matrix (dm x dn), derivatives of vertex coordinates wrt parameters * Weight is additional weighting terms for individual AO images, 1xnao vector (not implemented yet) * Scale additional scaling terms for each ao image. * Albedo nfac vector, optional * Alimit, albedo limit 2-vector * deriv==1, then the derivatives will be calculated * OUTPUT: * FTr,FTi real and imaginary results * Derivative matrix FTdvr (real) FTdvi (imag) */ /* Denote the total number of data points by ntpoints. Then * FT is 2*ntpoints vector * FTdv is 2*ntpoints x (3*dn+3+2*nao) matrix =[real(FTdx)*D real(FTdy)*D real(FTdz)*D real(FTdA) real(FTdoff); * imag(FTdx)*D imag(FTdy)*D imag(FTdz)*D imag(FTdA) imag(FTdoff);...] * FTdS is an optional matrix for Scaling terms * dAr, dAi 2*ntpointsxnfac matrices, only if albedo is to be fitted * NOTE THAT FTdv is assumed to be initialized to zero */ /*TBD: Combine real and complex matrices here*/ int DisNULL=0; int D1V=0; int D3V=0; int UseScale=0; int UseWeight=0; if(scale!=NULL) UseScale=1; int nao; nao=AOs->nao; //Number of AO images /*First some sanity checking*/ if(D==NULL) DisNULL=1; if(!DisNULL && nvert!=dm) { puts("Error: nvert is not equal dm."); exit(1); } double TB=0; if(Weight!=NULL) UseWeight=1; int M,N; int *nopoints,*cumpoints,ntpoints; nopoints=AOs->nobs; //Array, number of samples in each AO image cumpoints=malloc((nao+1)*sizeof(int)); cumpoints[0]=0; for(int i=1;i<=nao;i++) cumpoints[i]=cumpoints[i-1]+nopoints[i-1]; //cumpoints is the cumulative sum of all observation points, used for indexing ntpoints=cumpoints[nao];//Total number of points omp_set_num_threads(NUM_THREADS); #pragma omp parallel for for(int obsind=0;obsind<nao;obsind++) { double Scale=1; if(UseScale==1) Scale=exp(scale[obsind]); double *FTE,*FTE0,*FTTIME,*FTfreqx,*FTfreqy,*FTup,*FTdist,*datar,*datai; double *FTr; double *FTi; double *psfi; double *psfr; double W; if(UseWeight==1) W=Weight[obsind]; else W=1; FTr=calloc(nopoints[obsind],sizeof(double)); FTi=calloc(nopoints[obsind],sizeof(double)); FTE=AOs->E+3*obsind; FTE0=AOs->E0+3*obsind; FTup=AOs->up+3*obsind; FTTIME=AOs->TIME+obsind; FTfreqx=AOs->freqx[obsind]; FTfreqy=AOs->freqy[obsind]; FTdist=AOs->distance+obsind; datar=AOs->datar[obsind]; datai=AOs->datai[obsind]; psfr=AOs->psfr[obsind]; psfi=AOs->psfi[obsind]; // double time=omp_get_wtime(); TB=Calculate_AO(tlist,vlist,nfac,nvert,angles,FTE,FTE0,FTup,*FTTIME,*FTdist,FTfreqx,FTfreqy,nopoints[obsind],offset+2*obsind,FTr,FTi,Albedo,Alimit); // printf("Time taken: %f\n",omp_get_wtime()-time); if(psfr==NULL || psfi==NULL) { for(int j=0;j<nopoints[obsind];j++) { FT[j+cumpoints[obsind]]=W*(datar[j]-Scale*FTr[j]*TB/INI_AO_TOTAL_BRIGHT[obsind]); FT[j+cumpoints[obsind]+ntpoints]=W*(datai[j]-Scale*FTi[j]*TB/INI_AO_TOTAL_BRIGHT[obsind]); } } else { for(int j=0;j<nopoints[obsind];j++) { FT[j+cumpoints[obsind]]=W*(datar[j]-Scale*(psfr[j]*FTr[j]-psfi[j]*FTi[j])*TB/INI_AO_TOTAL_BRIGHT[obsind]); FT[j+cumpoints[obsind]+ntpoints]=W*(datai[j]-Scale*(psfi[j]*FTr[j]+psfr[j]*FTi[j])*TB/INI_AO_TOTAL_BRIGHT[obsind]); } } free(FTr); free(FTi); } free(cumpoints); }
GB_unaryop__lnot_fp32_fp32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_fp32_fp32 // op(A') function: GB_tran__lnot_fp32_fp32 // C type: float // A type: float // cast: float cij = (float) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ float #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, x) \ float z = (float) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_fp32_fp32 ( float *restrict Cx, const float *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_fp32_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
QLA_F3_c1_veq_V_dot_V.c
/**************** QLA_F3_c_veq_V_dot_V.c ********************/ #include <stdio.h> #include <qla_config.h> #include <qla_types.h> #include <qla_random.h> #include <qla_cmath.h> #include <qla_f3.h> #include <math.h> static void start_slice(){ __asm__ __volatile__ (""); } static void end_slice(){ __asm__ __volatile__ (""); } void QLA_F3_c_veq_V_dot_V ( QLA_F_Complex *restrict r, QLA_F3_ColorVector *restrict a, QLA_F3_ColorVector *restrict b, int n) { start_slice(); #ifdef HAVE_XLC #pragma disjoint(*r,*a,*b) __alignx(16,r); __alignx(16,a); __alignx(16,b); #endif QLA_D_Complex sum; QLA_c_eq_r(sum,0.); #pragma omp parallel { QLA_D_Complex sum_local; QLA_c_eq_r(sum_local,0.); #pragma omp for for(int i=0; i<n; i++) { for(int i_c=0; i_c<3; i_c++) { QLA_c_peq_ca_times_c(sum_local, QLA_DF_c(QLA_F3_elem_V(a[i],i_c)), QLA_DF_c(QLA_F3_elem_V(b[i],i_c))); } } #pragma omp critical { QLA_c_peq_c(sum,sum_local); } } QLA_FD_c_eq_c(*r,sum); end_slice(); }
ep.c
/*-------------------------------------------------------------------- NAS Parallel Benchmarks 3.0 structured OpenMP C versions - EP This benchmark is an OpenMP C version of the NPB EP code. The OpenMP C 2.3 versions are derived by RWCP from the serial Fortran versions in "NPB 2.3-serial" developed by NAS. 3.0 translation is performed by the UVSQ. Permission to use, copy, distribute and modify this software for any purpose with or without fee is hereby granted. This software is provided "as is" without express or implied warranty. Information on OpenMP activities at RWCP is available at: http://pdplab.trc.rwcp.or.jp/pdperf/Omni/ Information on NAS Parallel Benchmarks 2.3 is available at: http://www.nas.nasa.gov/NAS/NPB/ --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- Author: P. O. Frederickson D. H. Bailey A. C. Woo OpenMP C version: S. Satoh 3.0 structure translation: M. Popov --------------------------------------------------------------------*/ #include "../common/npb-C.h" #include "npbparams.h" /* parameters */ #define MK 16 #define MM (M - MK) #define NN (1 << MM) #define NK (1 << MK) #define NQ 10 #define EPSILON 1.0e-8 #define A 1220703125.0 #define S 271828183.0 #define TIMERS_ENABLED FALSE /* global variables */ /* common /storage/ */ static double x[2*NK]; static double q[NQ]; /*-------------------------------------------------------------------- program EMBAR c-------------------------------------------------------------------*/ /* c This is the serial version of the APP Benchmark 1, c the "embarassingly parallel" benchmark. c c M is the Log_2 of the number of complex pairs of uniform (0, 1) random c numbers. MK is the Log_2 of the size of each batch of uniform random c numbers. MK can be set for convenience on a given system, since it does c not affect the results. */ int main(int argc, char **argv) { double Mops, t1, t2, t3, t4, x1, x2, sx, sy, tm, an, tt, gc; double dum[3] = { 1.0, 1.0, 1.0 }; int np, ierr, node, no_nodes, i, ik, kk, l, k, nit, ierrcode, no_large_nodes, np_add, k_offset, j; int nthreads = 1; boolean verified; char size[13+1]; /* character*13 */ /* c Because the size of the problem is too large to store in a 32-bit c integer for some classes, we put it into a string (for printing). c Have to strip off the decimal point put in there by the floating c point print statement (internal file) */ printf("\n\n NAS Parallel Benchmarks 3.0 structured OpenMP C version" " - EP Benchmark\n"); sprintf(size, "%12.0f", pow(2.0, M+1)); #pragma omp parallel for for (j = 13; j >= 1; j--) { if (size[j] == '.') size[j] = ' '; } printf(" Number of random numbers generated: %13s\n", size); verified = FALSE; /* c Compute the number of "batches" of random number pairs generated c per processor. Adjust if the number of processors does not evenly c divide the total number */ np = NN; /* c Call the random number generator functions and initialize c the x-array to reduce the effects of paging on the timings. c Also, call all mathematical functions that are used. Make c sure these initializations cannot be eliminated as dead code. */ vranlc(0, &(dum[0]), dum[1], &(dum[2])); dum[0] = randlc(&(dum[1]), dum[2]); #pragma omp parallel for for (i = 0; i < 2*NK; i++) x[i] = -1.0e99; Mops = log(sqrt(fabs(max(1.0, 1.0)))); timer_clear(1); timer_clear(2); timer_clear(3); timer_start(1); vranlc(0, &t1, A, x); /* Compute AN = A ^ (2 * NK) (mod 2^46). */ t1 = A; for ( i = 1; i <= MK+1; i++) { t2 = randlc(&t1, t1); } an = t1; tt = S; gc = 0.0; sx = 0.0; sy = 0.0; #pragma omp parallel for for ( i = 0; i <= NQ - 1; i++) { q[i] = 0.0; } /* c Each instance of this loop may be performed independently. We compute c the k offsets separately to take into account the fact that some nodes c have more numbers to generate than others */ k_offset = -1; { double t1, t2, t3, t4, x1, x2; int kk, i, ik, l; double qq[NQ]; /* private copy of q[0:NQ-1] */ #pragma omp parallel for for (i = 0; i < NQ; i++) qq[i] = 0.0; #pragma omp parallel for reduction(+:sx) reduction(+:sy) for (k = 1; k <= np; k++) { kk = k_offset + k; t1 = S; t2 = an; /* Find starting seed t1 for this kk. */ for (i = 1; i <= 100; i++) { ik = kk / 2; if (2 * ik != kk) t3 = randlc(&t1, t2); if (ik == 0) break; t3 = randlc(&t2, t2); kk = ik; } /* Compute uniform pseudorandom numbers. */ if (TIMERS_ENABLED == TRUE) timer_start(3); vranlc(2*NK, &t1, A, x-1); if (TIMERS_ENABLED == TRUE) timer_stop(3); /* c Compute Gaussian deviates by acceptance-rejection method and c tally counts in concentric square annuli. This loop is not c vectorizable. */ if (TIMERS_ENABLED == TRUE) timer_start(2); for ( i = 0; i < NK; i++) { x1 = 2.0 * x[2*i] - 1.0; x2 = 2.0 * x[2*i+1] - 1.0; t1 = pow2(x1) + pow2(x2); if (t1 <= 1.0) { t2 = sqrt(-2.0 * log(t1) / t1); t3 = (x1 * t2); /* Xi */ t4 = (x2 * t2); /* Yi */ l = max(fabs(t3), fabs(t4)); qq[l] += 1.0; /* counts */ sx = sx + t3; /* sum of Xi */ sy = sy + t4; /* sum of Yi */ } } if (TIMERS_ENABLED == TRUE) timer_stop(2); } { #pragma omp parallel for for (i = 0; i <= NQ - 1; i++) q[i] += qq[i]; } #if defined(_OPENMP) nthreads = omp_get_num_threads(); #endif /* _OPENMP */ } /* end of parallel region */ #pragma omp parallel for private(i ) reduction(+:gc) for (i = 0; i <= NQ-1; i++) { gc = gc + q[i]; } timer_stop(1); tm = timer_read(1); nit = 0; if (M == 24) { if((fabs((sx- (-3.247834652034740e3))/sx) <= EPSILON) && (fabs((sy- (-6.958407078382297e3))/sy) <= EPSILON)) { verified = TRUE; } } else if (M == 25) { if ((fabs((sx- (-2.863319731645753e3))/sx) <= EPSILON) && (fabs((sy- (-6.320053679109499e3))/sy) <= EPSILON)) { verified = TRUE; } } else if (M == 28) { if ((fabs((sx- (-4.295875165629892e3))/sx) <= EPSILON) && (fabs((sy- (-1.580732573678431e4))/sy) <= EPSILON)) { verified = TRUE; } } else if (M == 30) { if ((fabs((sx- (4.033815542441498e4))/sx) <= EPSILON) && (fabs((sy- (-2.660669192809235e4))/sy) <= EPSILON)) { verified = TRUE; } } else if (M == 32) { if ((fabs((sx- (4.764367927995374e4))/sx) <= EPSILON) && (fabs((sy- (-8.084072988043731e4))/sy) <= EPSILON)) { verified = TRUE; } } Mops = pow(2.0, M+1)/tm/1000000.0; printf("EP Benchmark Results: \n" "CPU Time = %10.4f\n" "N = 2^%5d\n" "No. Gaussian Pairs = %15.0f\n" "Sums = %25.15e %25.15e\n" "Counts:\n", tm, M, gc, sx, sy); for (i = 0; i <= NQ-1; i++) { printf("%3d %15.0f\n", i, q[i]); } c_print_results("EP", CLASS, M+1, 0, 0, nit, nthreads, tm, Mops, "Random numbers generated", verified, NPBVERSION, COMPILETIME, CS1, CS2, CS3, CS4, CS5, CS6, CS7); if (TIMERS_ENABLED == TRUE) { printf("Total time: %f", timer_read(1)); printf("Gaussian pairs: %f", timer_read(2)); printf("Random numbers: %f", timer_read(3)); } }
Example_tasking.14.c
/* * @@name: tasking.14c * @@type: C * @@compilable: yes * @@linkable: no * @@expect: success * @@version: omp_3.1 */ void bar(void); void foo ( ) { int i; #pragma omp task if(0) // This task is undeferred { #pragma omp task // This task is a regular task for (i = 0; i < 3; i++) { #pragma omp task // This task is a regular task bar(); } } #pragma omp task final(1) // This task is a regular task { #pragma omp task // This task is included for (i = 0; i < 3; i++) { #pragma omp task // This task is also included bar(); } } }
GB_unaryop__minv_fp64_int8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_fp64_int8 // op(A') function: GB_tran__minv_fp64_int8 // C type: double // A type: int8_t // cast: double cij = (double) aij // unaryop: cij = 1./aij #define GB_ATYPE \ int8_t #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = 1./x ; // casting #define GB_CASTING(z, x) \ double z = (double) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_FP64 || GxB_NO_INT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_fp64_int8 ( double *restrict Cx, const int8_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_fp64_int8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
matrix_stat.h
#ifndef MATRIX_STAT_H_ #define MATRIX_STAT_H_ #include <vector> #include <algorithm> namespace acspo { template <typename T> double sum(const matrix<T> &mat) { unsigned int elem = mat.elem(); unsigned int count = 0; double ret = 0; #pragma omp parallel for reduction(+:ret,count) for (unsigned int i = 0; i < elem; i++) { if (!std::isnan(mat(i))) { ret += mat(i); count++; } } if (count == 0) { return NAN; } return ret; } template <typename T> double mean(const matrix<T> &mat) { unsigned int elem = mat.elem(); unsigned int count = 0; double ret = 0; #pragma omp parallel for reduction(+:ret,count) for (unsigned int i = 0; i < elem; i++) { if (!std::isnan(mat(i))) { ret += mat(i); count++; } } if (count == 0) { return NAN; } ret /= count; return ret; } template <typename T> double var(const matrix<T> &mat, double avg) { unsigned int elem = mat.elem(); unsigned int count = 0; double ret = 0; #pragma omp parallel for reduction(+:ret,count) for (unsigned int i = 0; i < elem; i++) { if (!std::isnan(mat(i))) { ret += (mat(i)-avg)*(mat(i)-avg); count++; } } if (count == 0) { return NAN; } ret /= count; return ret; } template <typename T> double var(const matrix<T> &mat) { return var(mat, mean(mat)); } template <typename T> double std_dev(const matrix<T> &mat, double avg) { return std::sqrt(var(mat, avg)); } template <typename T> double std_dev(const matrix<T> &mat) { return std::sqrt(var(mat)); } template <typename T> double med(const matrix<T> &mat) { unsigned int elem = mat.elem(); std::vector<double> buf; buf.reserve(elem); for (unsigned int i = 0; i < elem; i++) { if (!std::isnan(mat(i))) { buf.push_back(mat(i)); } } if (buf.size() == 0) { return NAN; } std::sort(buf.begin(), buf.end()); if (buf.size() % 2 == 1) { return buf[(buf.size()-1)/2]; } else { return 0.5*(buf[buf.size()/2-1]+buf[buf.size()/2]); } } } #endif
omp-for-private.c
#include <stdio.h> int main() { int i,j; j = -1; #pragma omp parallel for private(j) for(i = 0; i < 11; i++) { printf("Hello World %d\n", i); j = i; printf("j = %d\n", j); } printf("Outside the Parallel Region: j = %d\n", j); return 0; }
hermv_c_dia_u_lo.c
#include "alphasparse/kernel.h" #include "alphasparse/util.h" #include "alphasparse/opt.h" #ifdef _OPENMP #include <omp.h> #endif #include <memory.h> #include <stdlib.h> alphasparse_status_t ONAME(const ALPHA_Complex alpha, const ALPHA_SPMAT_DIA *A, const ALPHA_Complex *x, const ALPHA_Complex beta, ALPHA_Complex *y) { const ALPHA_INT m = A->rows; const ALPHA_INT n = A->cols; if(m != n) return ALPHA_SPARSE_STATUS_INVALID_VALUE; const ALPHA_INT thread_num = alpha_get_thread_num(); ALPHA_Number** tmp = (ALPHA_Number**)malloc(sizeof(ALPHA_Number*) * thread_num); #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for(int i = 0; i < thread_num; ++i) { tmp[i] = malloc(sizeof(ALPHA_Number) * m); memset(tmp[i], 0, sizeof(ALPHA_Number) * m); } const ALPHA_INT diags = A->ndiag; #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for (ALPHA_INT i = 0; i < diags; ++i) { const ALPHA_INT threadId = alpha_get_thread_id(); const ALPHA_INT dis = A->distance[i]; if(dis < 0) { const ALPHA_INT row_start = -dis; const ALPHA_INT col_start = 0; const ALPHA_INT nnz = m + dis; const ALPHA_INT start = i * A->lval; for(ALPHA_INT j = 0; j < nnz; ++j) { ALPHA_Complex v,v_c; ALPHA_Complex val_orig = A->values[start + row_start + j]; ALPHA_Complex val_conj = {val_orig.real,-val_orig.imag}; alpha_mul(v, alpha, val_orig); alpha_mul(v_c, alpha, val_conj); alpha_madde(tmp[threadId][row_start + j], v, x[col_start + j]); alpha_madde(tmp[threadId][col_start + j], v_c, x[row_start + j]); } } } #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for(ALPHA_INT i = 0; i < m; ++i) { alpha_mul(y[i], beta, y[i]); alpha_madde(y[i], alpha, x[i]); for(ALPHA_INT j = 0; j < thread_num; ++j) { alpha_add(y[i], y[i], tmp[j][i]); } } #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for (ALPHA_INT i = 0; i < thread_num; ++i) { alpha_free(tmp[i]); } alpha_free(tmp); return ALPHA_SPARSE_STATUS_SUCCESS; }
3d7pt_var.c
/* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 32; tile_size[1] = 32; tile_size[2] = 8; tile_size[3] = 64; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] + coef[1][i][j][k] * A[t%2][i-1][j ][k ] + coef[2][i][j][k] * A[t%2][i ][j-1][k ] + coef[3][i][j][k] * A[t%2][i ][j ][k-1] + coef[4][i][j][k] * A[t%2][i+1][j ][k ] + coef[5][i][j][k] * A[t%2][i ][j+1][k ] + coef[6][i][j][k] * A[t%2][i ][j ][k+1]; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
GB_subassign_12.c
//------------------------------------------------------------------------------ // GB_subassign_12: C(I,J)<M,repl> += A ; using S //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // Method 12: C(I,J)<M,repl> += A ; using S // M: present // Mask_comp: false // C_replace: true // accum: present // A: matrix // S: constructed #define GB_FREE_WORK GB_FREE_TWO_SLICE #include "GB_subassign_methods.h" GrB_Info GB_subassign_12 ( GrB_Matrix C, // input: const GrB_Index *I, const int64_t nI, const int Ikind, const int64_t Icolon [3], const GrB_Index *J, const int64_t nJ, const int Jkind, const int64_t Jcolon [3], const GrB_Matrix M, const bool Mask_struct, // if true, use the only structure of M const GrB_BinaryOp accum, const GrB_Matrix A, const GrB_Matrix S, GB_Context Context ) { //-------------------------------------------------------------------------- // get inputs //-------------------------------------------------------------------------- GB_GET_C ; GB_GET_MASK ; const bool M_is_hyper = M->is_hyper ; const int64_t Mnvec = M->nvec ; const int64_t mvlen = M->vlen ; GB_GET_A ; GB_GET_S ; GB_GET_ACCUM ; //-------------------------------------------------------------------------- // Method 12: C(I,J)<M,repl> += A ; using S //-------------------------------------------------------------------------- // Time: all entries in S+A must be traversed, so Omega(nnz(S)+nnz(A)) is // required. All cases of the mask (0, 1, or not present) must be // considered, because of the C_replace descriptor being true. // Method 12 and Method 20 are very similar. //-------------------------------------------------------------------------- // Parallel: Z=A+S (Methods 02, 04, 09, 10, 11, 12, 14, 16, 18, 20) //-------------------------------------------------------------------------- GB_SUBASSIGN_TWO_SLICE (A, S) ; //-------------------------------------------------------------------------- // phase 1: create zombies, update entries, and count pending tuples //-------------------------------------------------------------------------- int taskid ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(+:nzombies) for (taskid = 0 ; taskid < ntasks ; taskid++) { //---------------------------------------------------------------------- // get the task descriptor //---------------------------------------------------------------------- GB_GET_TASK_DESCRIPTOR_PHASE1 ; //---------------------------------------------------------------------- // compute all vectors in this task //---------------------------------------------------------------------- for (int64_t k = kfirst ; k <= klast ; k++) { //------------------------------------------------------------------ // get A(:,j) and S(:,j) //------------------------------------------------------------------ int64_t j = (Zh == NULL) ? k : Zh [k] ; GB_GET_MAPPED_VECTOR (pA, pA_end, pA, pA_end, Ap, j, k, Z_to_X) ; GB_GET_MAPPED_VECTOR (pS, pS_end, pB, pB_end, Sp, j, k, Z_to_S) ; //------------------------------------------------------------------ // get M(:,j) //------------------------------------------------------------------ int64_t pM_start, pM_end ; GB_VECTOR_LOOKUP (pM_start, pM_end, M, j) ; bool mjdense = (pM_end - pM_start) == mvlen ; //------------------------------------------------------------------ // do a 2-way merge of S(:,j) and A(:,j) //------------------------------------------------------------------ // jC = J [j] ; or J is a colon expression // int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ; // while both list S (:,j) and A (:,j) have entries while (pS < pS_end && pA < pA_end) { int64_t iS = Si [pS] ; int64_t iA = Ai [pA] ; if (iS < iA) { // S (i,j) is present but A (i,j) is not GB_MIJ_BINARY_SEARCH_OR_DENSE_LOOKUP (iS) ; if (!mij) { // ----[C . 0] or [X . 0]------------------------------- // [X . 0]: action: ( X ): still a zombie // [C . 0]: C_repl: action: ( delete ): becomes zombie GB_C_S_LOOKUP ; GB_DELETE_ENTRY ; } GB_NEXT (S) ; } else if (iA < iS) { // S (i,j) is not present, A (i,j) is present GB_MIJ_BINARY_SEARCH_OR_DENSE_LOOKUP (iA) ; if (mij) { // ----[. A 1]------------------------------------------ // [. A 1]: action: ( insert ) task_pending++ ; } GB_NEXT (A) ; } else { // both S (i,j) and A (i,j) present GB_MIJ_BINARY_SEARCH_OR_DENSE_LOOKUP (iA) ; GB_C_S_LOOKUP ; if (mij) { // ----[C A 1] or [X A 1]------------------------------- // [C A 1]: action: ( =A ): A to C no accum // [C A 1]: action: ( =C+A ): apply accum // [X A 1]: action: ( undelete ): zombie lives GB_withaccum_C_A_1_matrix ; } else { // ----[C A 0] or [X A 0]------------------------------- // [X A 0]: action: ( X ): still a zombie // [C A 0]: C_repl: action: ( delete ): becomes zombie GB_DELETE_ENTRY ; } GB_NEXT (S) ; GB_NEXT (A) ; } } // while list S (:,j) has entries. List A (:,j) exhausted while (pS < pS_end) { int64_t iS = Si [pS] ; GB_MIJ_BINARY_SEARCH_OR_DENSE_LOOKUP (iS) ; if (!mij) { // ----[C . 0] or [X . 0]----------------------------------- // [X . 0]: action: ( X ): still a zombie // [C . 0]: C_repl: action: ( delete ): becomes zombie GB_C_S_LOOKUP ; GB_DELETE_ENTRY ; } GB_NEXT (S) ; } // while list A (:,j) has entries. List S (:,j) exhausted while (pA < pA_end) { // S (i,j) is not present, A (i,j) is present int64_t iA = Ai [pA] ; GB_MIJ_BINARY_SEARCH_OR_DENSE_LOOKUP (iA) ; if (mij) { // ----[. A 1]---------------------------------------------- // [. A 1]: action: ( insert ) task_pending++ ; } GB_NEXT (A) ; } } GB_PHASE1_TASK_WRAPUP ; } //-------------------------------------------------------------------------- // phase 2: insert pending tuples //-------------------------------------------------------------------------- GB_PENDING_CUMSUM ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(&&:pending_sorted) for (taskid = 0 ; taskid < ntasks ; taskid++) { //---------------------------------------------------------------------- // get the task descriptor //---------------------------------------------------------------------- GB_GET_TASK_DESCRIPTOR_PHASE2 ; //---------------------------------------------------------------------- // compute all vectors in this task //---------------------------------------------------------------------- for (int64_t k = kfirst ; k <= klast ; k++) { //------------------------------------------------------------------ // get A(:,j) and S(:,j) //------------------------------------------------------------------ int64_t j = (Zh == NULL) ? k : Zh [k] ; GB_GET_MAPPED_VECTOR (pA, pA_end, pA, pA_end, Ap, j, k, Z_to_X) ; GB_GET_MAPPED_VECTOR (pS, pS_end, pB, pB_end, Sp, j, k, Z_to_S) ; //------------------------------------------------------------------ // get M(:,j) //------------------------------------------------------------------ int64_t pM_start, pM_end ; GB_VECTOR_LOOKUP (pM_start, pM_end, M, j) ; bool mjdense = (pM_end - pM_start) == mvlen ; //------------------------------------------------------------------ // do a 2-way merge of S(:,j) and A(:,j) //------------------------------------------------------------------ // jC = J [j] ; or J is a colon expression int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ; // while both list S (:,j) and A (:,j) have entries while (pS < pS_end && pA < pA_end) { int64_t iS = Si [pS] ; int64_t iA = Ai [pA] ; if (iS < iA) { // S (i,j) is present but A (i,j) is not GB_NEXT (S) ; } else if (iA < iS) { // S (i,j) is not present, A (i,j) is present GB_MIJ_BINARY_SEARCH_OR_DENSE_LOOKUP (iA) ; if (mij) { // ----[. A 1]------------------------------------------ // [. A 1]: action: ( insert ) int64_t iC = GB_ijlist (I, iA, Ikind, Icolon) ; GB_PENDING_INSERT (Ax +(pA*asize)) ; } GB_NEXT (A) ; } else { // both S (i,j) and A (i,j) present GB_NEXT (S) ; GB_NEXT (A) ; } } // while list A (:,j) has entries. List S (:,j) exhausted while (pA < pA_end) { // S (i,j) is not present, A (i,j) is present int64_t iA = Ai [pA] ; GB_MIJ_BINARY_SEARCH_OR_DENSE_LOOKUP (iA) ; if (mij) { // ----[. A 1]---------------------------------------------- // [. A 1]: action: ( insert ) int64_t iC = GB_ijlist (I, iA, Ikind, Icolon) ; GB_PENDING_INSERT (Ax +(pA*asize)) ; } GB_NEXT (A) ; } } GB_PHASE2_TASK_WRAPUP ; } //-------------------------------------------------------------------------- // finalize the matrix and return result //-------------------------------------------------------------------------- GB_SUBASSIGN_WRAPUP ; }
wand-view.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % W W AAA N N DDDD % % W W A A NN N D D % % W W W AAAAA N N N D D % % WW WW A A N NN D D % % W W A A N N DDDD % % % % V V IIIII EEEEE W W % % V V I E W W % % V V I EEE W W W % % V V I E WW WW % % V IIIII EEEEE W W % % % % % % MagickWand Wand View Methods % % % % Software Design % % Cristy % % March 2003 % % % % % % Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickWand/studio.h" #include "MagickWand/MagickWand.h" #include "MagickWand/magick-wand-private.h" #include "MagickWand/wand.h" #include "MagickCore/monitor-private.h" #include "MagickCore/thread-private.h" /* Define declarations. */ #define WandViewId "WandView" /* Typedef declarations. */ struct _WandView { size_t id; char name[MagickPathExtent], *description; RectangleInfo extent; MagickWand *wand; Image *image; CacheView *view; PixelWand ***pixel_wands; ExceptionInfo *exception; MagickBooleanType debug; size_t signature; }; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e W a n d V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneWandView() makes a copy of the specified wand view. % % The format of the CloneWandView method is: % % WandView *CloneWandView(const WandView *wand_view) % % A description of each parameter follows: % % o wand_view: the wand view. % */ WandExport WandView *CloneWandView(const WandView *wand_view) { WandView *clone_view; register ssize_t i; assert(wand_view != (WandView *) NULL); assert(wand_view->signature == MagickWandSignature); if (wand_view->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand_view->name); clone_view=(WandView *) AcquireMagickMemory(sizeof(*clone_view)); if (clone_view == (WandView *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", wand_view->name); (void) memset(clone_view,0,sizeof(*clone_view)); clone_view->id=AcquireWandId(); (void) FormatLocaleString(clone_view->name,MagickPathExtent,"%s-%.20g", WandViewId,(double) clone_view->id); clone_view->description=ConstantString(wand_view->description); clone_view->image=CloneImage(wand_view->image,0,0,MagickTrue, wand_view->exception); clone_view->view=CloneCacheView(wand_view->view); clone_view->extent=wand_view->extent; clone_view->exception=AcquireExceptionInfo(); InheritException(clone_view->exception,wand_view->exception); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) clone_view->pixel_wands[i]=ClonePixelWands((const PixelWand **) wand_view->pixel_wands[i],wand_view->extent.width); clone_view->debug=wand_view->debug; if (clone_view->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",clone_view->name); clone_view->signature=MagickWandSignature; return(clone_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y W a n d V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyWandView() deallocates memory associated with a wand view. % % The format of the DestroyWandView method is: % % WandView *DestroyWandView(WandView *wand_view) % % A description of each parameter follows: % % o wand_view: the wand view. % */ static PixelWand ***DestroyPixelsThreadSet(PixelWand ***pixel_wands, const size_t number_wands) { register ssize_t i; assert(pixel_wands != (PixelWand ***) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (pixel_wands[i] != (PixelWand **) NULL) pixel_wands[i]=DestroyPixelWands(pixel_wands[i],number_wands); pixel_wands=(PixelWand ***) RelinquishMagickMemory(pixel_wands); return(pixel_wands); } WandExport WandView *DestroyWandView(WandView *wand_view) { assert(wand_view != (WandView *) NULL); assert(wand_view->signature == MagickWandSignature); wand_view->pixel_wands=DestroyPixelsThreadSet(wand_view->pixel_wands, wand_view->extent.width); wand_view->image=DestroyImage(wand_view->image); wand_view->view=DestroyCacheView(wand_view->view); wand_view->exception=DestroyExceptionInfo(wand_view->exception); wand_view->signature=(~MagickWandSignature); RelinquishWandId(wand_view->id); wand_view=(WandView *) RelinquishMagickMemory(wand_view); return(wand_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D u p l e x T r a n s f e r W a n d V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DuplexTransferWandViewIterator() iterates over three wand views in % parallel and calls your transfer method for each scanline of the view. The % source and duplex pixel extent is not confined to the image canvas-- that is % you can include negative offsets or widths or heights that exceed the image % dimension. However, the destination wand view is confined to the image % canvas-- that is no negative offsets or widths or heights that exceed the % image dimension are permitted. % % The callback signature is: % % MagickBooleanType DuplexTransferImageViewMethod(const WandView *source, % const WandView *duplex,WandView *destination,const ssize_t y, % const int thread_id,void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback transfer method that must be % executed by a single thread at a time. % % The format of the DuplexTransferWandViewIterator method is: % % MagickBooleanType DuplexTransferWandViewIterator(WandView *source, % WandView *duplex,WandView *destination, % DuplexTransferWandViewMethod transfer,void *context) % % A description of each parameter follows: % % o source: the source wand view. % % o duplex: the duplex wand view. % % o destination: the destination wand view. % % o transfer: the transfer callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType DuplexTransferWandViewIterator(WandView *source, WandView *duplex,WandView *destination,DuplexTransferWandViewMethod transfer, void *context) { Image *destination_image, *source_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(source != (WandView *) NULL); assert(source->signature == MagickWandSignature); if (transfer == (DuplexTransferWandViewMethod) NULL) return(MagickFalse); source_image=source->wand->images; destination_image=destination->wand->images; status=SetImageStorageClass(destination_image,DirectClass, destination->exception); if (status == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) height=source->extent.height-source->extent.y; #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(source_image,destination_image,height,1) #endif for (y=source->extent.y; y < (ssize_t) source->extent.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register const Quantum *magick_restrict duplex_pixels, *magick_restrict pixels; register ssize_t x; register Quantum *magick_restrict destination_pixels; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y, source->extent.width,1,source->exception); if (pixels == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) source->extent.width; x++) { PixelSetQuantumPixel(source->image,pixels,source->pixel_wands[id][x]); pixels+=GetPixelChannels(source->image); } duplex_pixels=GetCacheViewVirtualPixels(duplex->view,duplex->extent.x,y, duplex->extent.width,1,duplex->exception); if (duplex_pixels == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) duplex->extent.width; x++) { PixelSetQuantumPixel(duplex->image,duplex_pixels, duplex->pixel_wands[id][x]); duplex_pixels+=GetPixelChannels(duplex->image); } destination_pixels=GetCacheViewAuthenticPixels(destination->view, destination->extent.x,y,destination->extent.width,1, destination->exception); if (destination_pixels == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) destination->extent.width; x++) { PixelSetQuantumPixel(destination->image,destination_pixels, destination->pixel_wands[id][x]); destination_pixels+=GetPixelChannels(destination->image); } if (transfer(source,duplex,destination,y,id,context) == MagickFalse) status=MagickFalse; destination_pixels=GetCacheViewAuthenticPixels(destination->view, destination->extent.x,y,destination->extent.width,1, destination->exception); for (x=0; x < (ssize_t) destination->extent.width; x++) { PixelGetQuantumPixel(destination->image,destination->pixel_wands[id][x], destination_pixels); destination_pixels+=GetPixelChannels(destination->image); } sync=SyncCacheViewAuthenticPixels(destination->view,destination->exception); if (sync == MagickFalse) status=MagickFalse; if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(source_image,source->description,progress, source->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t W a n d V i e w E x c e p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetWandViewException() returns the severity, reason, and description of any % error that occurs when utilizing a wand view. % % The format of the GetWandViewException method is: % % char *GetWandViewException(const WandView *wand_view, % ExceptionType *severity) % % A description of each parameter follows: % % o wand_view: the pixel wand_view. % % o severity: the severity of the error is returned here. % */ WandExport char *GetWandViewException(const WandView *wand_view, ExceptionType *severity) { char *description; assert(wand_view != (const WandView *) NULL); assert(wand_view->signature == MagickWandSignature); if (wand_view->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand_view->name); assert(severity != (ExceptionType *) NULL); *severity=wand_view->exception->severity; description=(char *) AcquireQuantumMemory(2UL*MagickPathExtent, sizeof(*description)); if (description == (char *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", wand_view->name); *description='\0'; if (wand_view->exception->reason != (char *) NULL) (void) CopyMagickString(description,GetLocaleExceptionMessage( wand_view->exception->severity,wand_view->exception->reason), MagickPathExtent); if (wand_view->exception->description != (char *) NULL) { (void) ConcatenateMagickString(description," (",MagickPathExtent); (void) ConcatenateMagickString(description,GetLocaleExceptionMessage( wand_view->exception->severity,wand_view->exception->description), MagickPathExtent); (void) ConcatenateMagickString(description,")",MagickPathExtent); } return(description); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t W a n d V i e w E x t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetWandViewExtent() returns the wand view extent. % % The format of the GetWandViewExtent method is: % % RectangleInfo GetWandViewExtent(const WandView *wand_view) % % A description of each parameter follows: % % o wand_view: the wand view. % */ WandExport RectangleInfo GetWandViewExtent(const WandView *wand_view) { assert(wand_view != (WandView *) NULL); assert(wand_view->signature == MagickWandSignature); return(wand_view->extent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t W a n d V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetWandViewIterator() iterates over the wand view in parallel and calls % your get method for each scanline of the view. The pixel extent is % not confined to the image canvas-- that is you can include negative offsets % or widths or heights that exceed the image dimension. Any updates to % the pixels in your callback are ignored. % % The callback signature is: % % MagickBooleanType GetImageViewMethod(const WandView *source, % const ssize_t y,const int thread_id,void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback get method that must be % executed by a single thread at a time. % % The format of the GetWandViewIterator method is: % % MagickBooleanType GetWandViewIterator(WandView *source, % GetWandViewMethod get,void *context) % % A description of each parameter follows: % % o source: the source wand view. % % o get: the get callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType GetWandViewIterator(WandView *source, GetWandViewMethod get,void *context) { Image *source_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(source != (WandView *) NULL); assert(source->signature == MagickWandSignature); if (get == (GetWandViewMethod) NULL) return(MagickFalse); source_image=source->wand->images; status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) height=source->extent.height-source->extent.y; #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(source_image,source_image,height,1) #endif for (y=source->extent.y; y < (ssize_t) source->extent.height; y++) { const int id = GetOpenMPThreadId(); register const Quantum *pixels; register ssize_t x; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y, source->extent.width,1,source->exception); if (pixels == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) source->extent.width; x++) { PixelSetQuantumPixel(source->image,pixels,source->pixel_wands[id][x]); pixels+=GetPixelChannels(source->image); } if (get(source,y,id,context) == MagickFalse) status=MagickFalse; if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(source_image,source->description,progress, source->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t W a n d V i e w P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetWandViewPixels() returns the wand view pixel_wands. % % The format of the GetWandViewPixels method is: % % PixelWand *GetWandViewPixels(const WandView *wand_view) % % A description of each parameter follows: % % o wand_view: the wand view. % */ WandExport PixelWand **GetWandViewPixels(const WandView *wand_view) { const int id = GetOpenMPThreadId(); assert(wand_view != (WandView *) NULL); assert(wand_view->signature == MagickWandSignature); return(wand_view->pixel_wands[id]); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t W a n d V i e w W a n d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetWandViewWand() returns the magick wand associated with the wand view. % % The format of the GetWandViewWand method is: % % MagickWand *GetWandViewWand(const WandView *wand_view) % % A description of each parameter follows: % % o wand_view: the wand view. % */ WandExport MagickWand *GetWandViewWand(const WandView *wand_view) { assert(wand_view != (WandView *) NULL); assert(wand_view->signature == MagickWandSignature); return(wand_view->wand); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s W a n d V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsWandView() returns MagickTrue if the the parameter is verified as a wand % view object. % % The format of the IsWandView method is: % % MagickBooleanType IsWandView(const WandView *wand_view) % % A description of each parameter follows: % % o wand_view: the wand view. % */ WandExport MagickBooleanType IsWandView(const WandView *wand_view) { size_t length; if (wand_view == (const WandView *) NULL) return(MagickFalse); if (wand_view->signature != MagickWandSignature) return(MagickFalse); length=strlen(WandViewId); if (LocaleNCompare(wand_view->name,WandViewId,length) != 0) return(MagickFalse); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e w W a n d V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NewWandView() returns a wand view required for all other methods in the % Wand View API. % % The format of the NewWandView method is: % % WandView *NewWandView(MagickWand *wand) % % A description of each parameter follows: % % o wand: the wand. % */ static PixelWand ***AcquirePixelsThreadSet(const size_t number_wands) { PixelWand ***pixel_wands; register ssize_t i; size_t number_threads; number_threads=GetOpenMPMaximumThreads(); pixel_wands=(PixelWand ***) AcquireQuantumMemory(number_threads, sizeof(*pixel_wands)); if (pixel_wands == (PixelWand ***) NULL) return((PixelWand ***) NULL); (void) memset(pixel_wands,0,number_threads*sizeof(*pixel_wands)); for (i=0; i < (ssize_t) number_threads; i++) { pixel_wands[i]=NewPixelWands(number_wands); if (pixel_wands[i] == (PixelWand **) NULL) return(DestroyPixelsThreadSet(pixel_wands,number_wands)); } return(pixel_wands); } WandExport WandView *NewWandView(MagickWand *wand) { ExceptionInfo *exception; WandView *wand_view; assert(wand != (MagickWand *) NULL); assert(wand->signature == MagickWandSignature); wand_view=(WandView *) AcquireMagickMemory(sizeof(*wand_view)); if (wand_view == (WandView *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", GetExceptionMessage(errno)); (void) memset(wand_view,0,sizeof(*wand_view)); wand_view->id=AcquireWandId(); (void) FormatLocaleString(wand_view->name,MagickPathExtent,"%s-%.20g", WandViewId,(double) wand_view->id); wand_view->description=ConstantString("WandView"); wand_view->wand=wand; exception=AcquireExceptionInfo(); wand_view->view=AcquireVirtualCacheView(wand_view->wand->images,exception); wand_view->extent.width=wand->images->columns; wand_view->extent.height=wand->images->rows; wand_view->pixel_wands=AcquirePixelsThreadSet(wand_view->extent.width); wand_view->exception=exception; if (wand_view->pixel_wands == (PixelWand ***) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", GetExceptionMessage(errno)); wand_view->debug=IsEventLogging(); wand_view->signature=MagickWandSignature; return(wand_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e w W a n d V i e w E x t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NewWandViewExtent() returns a wand view required for all other methods % in the Wand View API. % % The format of the NewWandViewExtent method is: % % WandView *NewWandViewExtent(MagickWand *wand,const ssize_t x, % const ssize_t y,const size_t width,const size_t height) % % A description of each parameter follows: % % o wand: the magick wand. % % o x,y,columns,rows: These values define the perimeter of a extent of % pixel_wands view. % */ WandExport WandView *NewWandViewExtent(MagickWand *wand,const ssize_t x, const ssize_t y,const size_t width,const size_t height) { ExceptionInfo *exception; WandView *wand_view; assert(wand != (MagickWand *) NULL); assert(wand->signature == MagickWandSignature); wand_view=(WandView *) AcquireMagickMemory(sizeof(*wand_view)); if (wand_view == (WandView *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", GetExceptionMessage(errno)); (void) memset(wand_view,0,sizeof(*wand_view)); wand_view->id=AcquireWandId(); (void) FormatLocaleString(wand_view->name,MagickPathExtent,"%s-%.20g", WandViewId,(double) wand_view->id); wand_view->description=ConstantString("WandView"); exception=AcquireExceptionInfo(); wand_view->view=AcquireVirtualCacheView(wand_view->wand->images,exception); wand_view->wand=wand; wand_view->extent.width=width; wand_view->extent.height=height; wand_view->extent.x=x; wand_view->extent.y=y; wand_view->exception=exception; wand_view->pixel_wands=AcquirePixelsThreadSet(wand_view->extent.width); if (wand_view->pixel_wands == (PixelWand ***) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", GetExceptionMessage(errno)); wand_view->debug=IsEventLogging(); wand_view->signature=MagickWandSignature; return(wand_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t W a n d V i e w D e s c r i p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetWandViewDescription() associates a description with an image view. % % The format of the SetWandViewDescription method is: % % void SetWandViewDescription(WandView *image_view,const char *description) % % A description of each parameter follows: % % o wand_view: the wand view. % % o description: the wand view description. % */ MagickExport void SetWandViewDescription(WandView *wand_view, const char *description) { assert(wand_view != (WandView *) NULL); assert(wand_view->signature == MagickWandSignature); wand_view->description=ConstantString(description); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t W a n d V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetWandViewIterator() iterates over the wand view in parallel and calls % your set method for each scanline of the view. The pixel extent is % confined to the image canvas-- that is no negative offsets or widths or % heights that exceed the image dimension. The pixels are initiallly % undefined and any settings you make in the callback method are automagically % synced back to your image. % % The callback signature is: % % MagickBooleanType SetImageViewMethod(ImageView *destination, % const ssize_t y,const int thread_id,void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback set method that must be % executed by a single thread at a time. % % The format of the SetWandViewIterator method is: % % MagickBooleanType SetWandViewIterator(WandView *destination, % SetWandViewMethod set,void *context) % % A description of each parameter follows: % % o destination: the wand view. % % o set: the set callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType SetWandViewIterator(WandView *destination, SetWandViewMethod set,void *context) { Image *destination_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(destination != (WandView *) NULL); assert(destination->signature == MagickWandSignature); if (set == (SetWandViewMethod) NULL) return(MagickFalse); destination_image=destination->wand->images; status=SetImageStorageClass(destination_image,DirectClass, destination->exception); if (status == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) height=destination->extent.height-destination->extent.y; #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(destination_image,destination_image,height,1) #endif for (y=destination->extent.y; y < (ssize_t) destination->extent.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register ssize_t x; register Quantum *magick_restrict pixels; if (status == MagickFalse) continue; pixels=GetCacheViewAuthenticPixels(destination->view,destination->extent.x, y,destination->extent.width,1,destination->exception); if (pixels == (Quantum *) NULL) { status=MagickFalse; continue; } if (set(destination,y,id,context) == MagickFalse) status=MagickFalse; for (x=0; x < (ssize_t) destination->extent.width; x++) { PixelGetQuantumPixel(destination->image,destination->pixel_wands[id][x], pixels); pixels+=GetPixelChannels(destination->image); } sync=SyncCacheViewAuthenticPixels(destination->view,destination->exception); if (sync == MagickFalse) status=MagickFalse; if (destination_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(destination_image,destination->description, progress,destination->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s f e r W a n d V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransferWandViewIterator() iterates over two wand views in parallel and % calls your transfer method for each scanline of the view. The source pixel % extent is not confined to the image canvas-- that is you can include % negative offsets or widths or heights that exceed the image dimension. % However, the destination wand view is confined to the image canvas-- that % is no negative offsets or widths or heights that exceed the image dimension % are permitted. % % The callback signature is: % % MagickBooleanType TransferImageViewMethod(const WandView *source, % WandView *destination,const ssize_t y,const int thread_id, % void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback transfer method that must be % executed by a single thread at a time. % % The format of the TransferWandViewIterator method is: % % MagickBooleanType TransferWandViewIterator(WandView *source, % WandView *destination,TransferWandViewMethod transfer,void *context) % % A description of each parameter follows: % % o source: the source wand view. % % o destination: the destination wand view. % % o transfer: the transfer callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType TransferWandViewIterator(WandView *source, WandView *destination,TransferWandViewMethod transfer,void *context) { Image *destination_image, *source_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(source != (WandView *) NULL); assert(source->signature == MagickWandSignature); if (transfer == (TransferWandViewMethod) NULL) return(MagickFalse); source_image=source->wand->images; destination_image=destination->wand->images; status=SetImageStorageClass(destination_image,DirectClass, destination->exception); if (status == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) height=source->extent.height-source->extent.y; #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(source_image,destination_image,height,1) #endif for (y=source->extent.y; y < (ssize_t) source->extent.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register const Quantum *magick_restrict pixels; register ssize_t x; register Quantum *magick_restrict destination_pixels; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y, source->extent.width,1,source->exception); if (pixels == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) source->extent.width; x++) { PixelSetQuantumPixel(source->image,pixels,source->pixel_wands[id][x]); pixels+=GetPixelChannels(source->image); } destination_pixels=GetCacheViewAuthenticPixels(destination->view, destination->extent.x,y,destination->extent.width,1, destination->exception); if (destination_pixels == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) destination->extent.width; x++) { PixelSetQuantumPixel(destination->image,destination_pixels, destination->pixel_wands[id][x]); destination_pixels+=GetPixelChannels(destination->image); } if (transfer(source,destination,y,id,context) == MagickFalse) status=MagickFalse; destination_pixels=GetCacheViewAuthenticPixels(destination->view, destination->extent.x,y,destination->extent.width,1, destination->exception); for (x=0; x < (ssize_t) destination->extent.width; x++) { PixelGetQuantumPixel(destination->image,destination->pixel_wands[id][x], destination_pixels); destination_pixels+=GetPixelChannels(destination->image); } sync=SyncCacheViewAuthenticPixels(destination->view,destination->exception); if (sync == MagickFalse) status=MagickFalse; if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(source_image,source->description,progress, source->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U p d a t e W a n d V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UpdateWandViewIterator() iterates over the wand view in parallel and calls % your update method for each scanline of the view. The pixel extent is % confined to the image canvas-- that is no negative offsets or widths or % heights that exceed the image dimension are permitted. Updates to pixels % in your callback are automagically synced back to the image. % % The callback signature is: % % MagickBooleanType UpdateImageViewMethod(WandView *source,const ssize_t y, % const int thread_id,void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback update method that must be % executed by a single thread at a time. % % The format of the UpdateWandViewIterator method is: % % MagickBooleanType UpdateWandViewIterator(WandView *source, % UpdateWandViewMethod update,void *context) % % A description of each parameter follows: % % o source: the source wand view. % % o update: the update callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType UpdateWandViewIterator(WandView *source, UpdateWandViewMethod update,void *context) { Image *source_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(source != (WandView *) NULL); assert(source->signature == MagickWandSignature); if (update == (UpdateWandViewMethod) NULL) return(MagickFalse); source_image=source->wand->images; status=SetImageStorageClass(source_image,DirectClass,source->exception); if (status == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) height=source->extent.height-source->extent.y; #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(source_image,source_image,height,1) #endif for (y=source->extent.y; y < (ssize_t) source->extent.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register ssize_t x; register Quantum *magick_restrict pixels; if (status == MagickFalse) continue; pixels=GetCacheViewAuthenticPixels(source->view,source->extent.x,y, source->extent.width,1,source->exception); if (pixels == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) source->extent.width; x++) { PixelSetQuantumPixel(source->image,pixels,source->pixel_wands[id][x]); pixels+=GetPixelChannels(source->image); } if (update(source,y,id,context) == MagickFalse) status=MagickFalse; for (x=0; x < (ssize_t) source->extent.width; x++) { PixelGetQuantumPixel(source->image,source->pixel_wands[id][x],pixels); pixels+=GetPixelChannels(source->image); } sync=SyncCacheViewAuthenticPixels(source->view,source->exception); if (sync == MagickFalse) status=MagickFalse; if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(source_image,source->description,progress, source->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); }
MulticolorGaussSeidelSMatTranspose.c
#include "mex.h" #include <omp.h> void mexFunction( int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { //mex -largeArrayDims MulticolorGaussSeidelSMatTranspose.c COMPFLAGS="$COMPFLAGS -openmp" LINKFALGS="$LINKFALGS -openmp" mwIndex i,j,i_color; int id, Nthrds, istart, iend,k; long idx_in_color; mwIndex *C_t = mxGetIr(prhs[0]); mwIndex *starts_t = mxGetJc(prhs[0]); double* vals_t = mxGetPr(prhs[0]); mwIndex n = mxGetN(prhs[0]); double temp; double* x_in = mxGetPr(prhs[1]); double* b = mxGetPr(prhs[2]); double* invDiag = mxGetPr(prhs[3]); int nu = (int)(*mxGetPr(prhs[4])); double* indicesOrder = mxGetPr(prhs[5]); double* colorStarts = mxGetPr(prhs[6]); mwIndex n_colors = max(mxGetM(prhs[6]),mxGetN(prhs[6]))-1; /* Output Variables */ double* x_out = 0; double* aux = (double*)malloc(n*sizeof(double)); plhs[0] = mxCreateDoubleMatrix(n, 1, mxREAL); x_out = mxGetPr(plhs[0]); /* Program */ #pragma omp parallel shared(colorStarts, indicesOrder,vals_t,C_t,starts_t,invDiag,x_in,b,aux) private(i_color,idx_in_color,k,i,j,temp,id, Nthrds, istart, iend) num_threads(omp_get_num_procs()/2) { //printf("%d",); id = omp_get_thread_num(); Nthrds = omp_get_num_threads(); istart = id * n / Nthrds; iend = (id+1) * n / Nthrds; if (id == Nthrds-1)iend = n; for ( i = istart ; i < iend ; ++i){ aux[i] = x_in[i]; } for (k = 0 ; k < nu ; k++){ #pragma omp barrier for ( i_color = 0 ; i_color < n_colors ; i_color++){ istart = id * ((colorStarts[i_color+1] - colorStarts[i_color]) / Nthrds) + colorStarts[i_color]; iend = (id+1) * ((colorStarts[i_color+1] - colorStarts[i_color]) / Nthrds) + colorStarts[i_color]; if (id == Nthrds-1)iend = colorStarts[i_color+1]; for ( idx_in_color = istart ; idx_in_color < iend ; ++idx_in_color){ i = indicesOrder[idx_in_color]; temp = 0.0; for (j = starts_t[i] ; j < starts_t[i+1] ; ++j) { temp += vals_t[j]*aux[C_t[j]]; } x_out[i] = aux[i] + (b[i] - temp)*invDiag[i]; } #pragma omp barrier for ( idx_in_color = istart ; idx_in_color < iend ; ++idx_in_color){ i = indicesOrder[idx_in_color]; aux[i] = x_out[i]; } #pragma omp barrier } } } free(aux); }
interp_kernel_arm.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2021, OPEN AI LAB * Author: haitao@openailab.com */ #include "interp_kernel_arm.h" #include "utility/sys_port.h" #include <math.h> #include <arm_neon.h> #define MIN(a, b) ((a) < (b) ? (a) : (b)) static void linear_coeffs(int w, int outw, int* xofs, float* alpha) { double scale = ( double )w / outw; for (int dx = 0; dx < outw; dx++) { float fx = ( float )((dx) * scale); int sx = floor(fx); fx -= sx; if (sx < 0) { sx = 0; fx = 0.f; } if (sx >= w - 1) { sx = w - 2; fx = 1.f; } xofs[dx] = sx; alpha[dx * 2] = 1.f - fx; alpha[dx * 2 + 1] = fx; } } static void resize_bilinear_image(float* src, float* dst, float* alpha, int* xofs, float* beta, int* yofs, int out_h, int out_w, int in_h, int in_w) { int w = out_w; // dst.w; int h = out_h; // dst.h; // loop body float* rowsbuf0 = ( float* )sys_malloc(w * sizeof(float)); float* rowsbuf1 = ( float* )sys_malloc(w * sizeof(float)); float* rows0 = rowsbuf0; float* rows1 = rowsbuf1; int prev_sy1 = -2; for (int dy = 0; dy < h; dy++) { int sy = yofs[dy]; if (sy == prev_sy1) { // reuse all rows } else if (sy == prev_sy1 + 1) { // hresize one row float* rows0_old = rows0; rows0 = rows1; rows1 = rows0_old; const float* S1 = src + (sy + 1) * in_w; // src.row(sy+1); const float* alphap = alpha; float* rows1p = rows1; // neon for (int dx = 0; dx + 1 < w; dx += 2) { int sx = xofs[dx]; int sxn = xofs[dx + 1]; const float* S1p = S1 + sx; const float* S1np = S1 + sxn; float32x4_t _a = vld1q_f32(alphap); float32x2_t _S1 = vld1_f32(S1p); float32x2_t _S1n = vld1_f32(S1np); float32x4_t _S1S1n = vcombine_f32(_S1, _S1n); float32x4_t _ms1 = vmulq_f32(_S1S1n, _a); float32x2_t _rows1 = vpadd_f32(vget_low_f32(_ms1), vget_high_f32(_ms1)); vst1_f32(rows1p + dx, _rows1); alphap += 4; } } else { // hresize two rows const float* S0 = src + sy * in_w; // src.row(sy); const float* S1 = src + (sy + 1) * in_w; // src.row(sy+1); const float* alphap = alpha; float* rows0p = rows0; float* rows1p = rows1; for (int dx = 0; dx + 1 < w; dx += 2) { int sx = xofs[dx]; int sxn = xofs[dx + 1]; const float* S0p = S0 + sx; const float* S1p = S1 + sx; const float* S0np = S0 + sxn; const float* S1np = S1 + sxn; float32x4_t _a = vld1q_f32(alphap); float32x2_t _S0 = vld1_f32(S0p); float32x2_t _S1 = vld1_f32(S1p); float32x2_t _S0n = vld1_f32(S0np); float32x2_t _S1n = vld1_f32(S1np); float32x4_t _S0S0n = vcombine_f32(_S0, _S0n); float32x4_t _S1S1n = vcombine_f32(_S1, _S1n); float32x4_t _ms0 = vmulq_f32(_S0S0n, _a); float32x4_t _ms1 = vmulq_f32(_S1S1n, _a); float32x2_t _rows0 = vpadd_f32(vget_low_f32(_ms0), vget_high_f32(_ms0)); float32x2_t _rows1 = vpadd_f32(vget_low_f32(_ms1), vget_high_f32(_ms1)); vst1_f32(rows0p + dx, _rows0); vst1_f32(rows1p + dx, _rows1); alphap += 4; } } prev_sy1 = sy; // vresize float b0 = beta[0]; float b1 = beta[1]; float* rows0p = rows0; float* rows1p = rows1; float* Dp = dst + dy * out_w; // dst.row(dy); int nn = w >> 3; int remain = w - (nn << 3); float32x4_t _b0 = vdupq_n_f32(b0); float32x4_t _b1 = vdupq_n_f32(b1); for (; nn > 0; nn--) { float32x4_t _rows0 = vld1q_f32(rows0p); float32x4_t _rows1 = vld1q_f32(rows1p); float32x4_t _D = vmulq_f32(_rows0, _b0); _D = vmlaq_f32(_D, _rows1, _b1); vst1q_f32(Dp, _D); float32x4_t _rows0n = vld1q_f32(rows0p + 4); float32x4_t _rows1n = vld1q_f32(rows1p + 4); float32x4_t _Dn = vmulq_f32(_rows0n, _b0); _Dn = vmlaq_f32(_Dn, _rows1n, _b1); vst1q_f32(Dp + 4, _Dn); Dp += 8; rows0p += 8; rows1p += 8; } for (; remain; --remain) { *Dp++ = *rows0p++ * b0 + *rows1p++ * b1; } beta += 2; } sys_free(rowsbuf0); sys_free(rowsbuf1); } static inline void interpolate_cubic(float fx, float* coeffs) { const float A = -0.75f; float fx0 = fx + 1; float fx1 = fx; float fx2 = 1 - fx; coeffs[0] = A * fx0 * fx0 * fx0 - 5 * A * fx0 * fx0 + 8 * A * fx0 - 4 * A; coeffs[1] = (A + 2) * fx1 * fx1 * fx1 - (A + 3) * fx1 * fx1 + 1; coeffs[2] = (A + 2) * fx2 * fx2 * fx2 - (A + 3) * fx2 * fx2 + 1; coeffs[3] = 1.f - coeffs[0] - coeffs[1] - coeffs[2]; } static void cubic_coeffs(int w, int outw, int* xofs, float* alpha) { double scale = ( double )w / outw; for (int dx = 0; dx < outw; dx++) { float fx = ( float )((dx + 0.5) * scale - 0.5); int sx = floor(fx); fx -= sx; interpolate_cubic(fx, alpha + dx * 4); if (sx <= -1) { sx = 1; alpha[dx * 4 + 0] = 1.f - alpha[dx * 4 + 3]; alpha[dx * 4 + 1] = alpha[dx * 4 + 3]; alpha[dx * 4 + 2] = 0.f; alpha[dx * 4 + 3] = 0.f; } if (sx == 0) { sx = 1; alpha[dx * 4 + 0] = alpha[dx * 4 + 0] + alpha[dx * 4 + 1]; alpha[dx * 4 + 1] = alpha[dx * 4 + 2]; alpha[dx * 4 + 2] = alpha[dx * 4 + 3]; alpha[dx * 4 + 3] = 0.f; } if (sx == w - 2) { sx = w - 3; alpha[dx * 4 + 3] = alpha[dx * 4 + 2] + alpha[dx * 4 + 3]; alpha[dx * 4 + 2] = alpha[dx * 4 + 1]; alpha[dx * 4 + 1] = alpha[dx * 4 + 0]; alpha[dx * 4 + 0] = 0.f; } if (sx >= w - 1) { sx = w - 3; alpha[dx * 4 + 3] = 1.f - alpha[dx * 4 + 0]; alpha[dx * 4 + 2] = alpha[dx * 4 + 0]; alpha[dx * 4 + 1] = 0.f; alpha[dx * 4 + 0] = 0.f; } xofs[dx] = sx; } } static void resize_bicubic_image(float* src, float* dst, float* alpha, int* xofs, float* beta, int* yofs, int out_h, int out_w, int in_h, int in_w) { int w = out_w; // dst.w; int h = out_h; // dst.h; // loop body float* rowsbuf0 = ( float* )sys_malloc(w * sizeof(float)); float* rowsbuf1 = ( float* )sys_malloc(w * sizeof(float)); float* rowsbuf2 = ( float* )sys_malloc(w * sizeof(float)); float* rowsbuf3 = ( float* )sys_malloc(w * sizeof(float)); float* rows0 = rowsbuf0; float* rows1 = rowsbuf1; float* rows2 = rowsbuf2; float* rows3 = rowsbuf3; int prev_sy1 = -3; for (int dy = 0; dy < h; dy++) { int sy = yofs[dy]; if (sy == prev_sy1) { // reuse all rows } else if (sy == prev_sy1 + 1) { // hresize one row float* rows0_old = rows0; rows0 = rows1; rows1 = rows2; rows2 = rows3; rows3 = rows0_old; const float* S3 = src + (sy + 2) * in_w; // src.row(sy+2); const float* alphap = alpha; float* rows3p = rows3; for (int dx = 0; dx < w; dx++) { int sx = xofs[dx]; const float* S3p = S3 + sx; float a0 = alphap[0]; float a1 = alphap[1]; float a2 = alphap[2]; float a3 = alphap[3]; rows3p[dx] = S3p[-1] * a0 + S3p[0] * a1 + S3p[1] * a2 + S3p[2] * a3; alphap += 4; } } else if (sy == prev_sy1 + 2) { // hresize two rows float* rows0_old = rows0; float* rows1_old = rows1; rows0 = rows2; rows1 = rows3; rows2 = rows0_old; rows3 = rows1_old; const float* S2 = src + (sy + 1) * in_w; // src.row(sy+1); const float* S3 = src + (sy + 2) * in_w; // src.row(sy+2); const float* alphap = alpha; float* rows2p = rows2; float* rows3p = rows3; for (int dx = 0; dx < w; dx++) { int sx = xofs[dx]; const float* S2p = S2 + sx; const float* S3p = S3 + sx; float a0 = alphap[0]; float a1 = alphap[1]; float a2 = alphap[2]; float a3 = alphap[3]; rows2p[dx] = S2p[-1] * a0 + S2p[0] * a1 + S2p[1] * a2 + S2p[2] * a3; rows3p[dx] = S3p[-1] * a0 + S3p[0] * a1 + S3p[1] * a2 + S3p[2] * a3; alphap += 4; } } else if (sy == prev_sy1 + 3) { // hresize three rows float* rows0_old = rows0; float* rows1_old = rows1; float* rows2_old = rows2; rows0 = rows3; rows1 = rows0_old; rows2 = rows1_old; rows3 = rows2_old; const float* S1 = src + sy * in_w; // src.row(sy); const float* S2 = src + (sy + 1) * in_w; // src.row(sy+1); const float* S3 = src + (sy + 2) * in_w; // src.row(sy+2); const float* alphap = alpha; float* rows1p = rows1; float* rows2p = rows2; float* rows3p = rows3; for (int dx = 0; dx < w; dx++) { int sx = xofs[dx]; const float* S1p = S1 + sx; const float* S2p = S2 + sx; const float* S3p = S3 + sx; float a0 = alphap[0]; float a1 = alphap[1]; float a2 = alphap[2]; float a3 = alphap[3]; rows1p[dx] = S1p[-1] * a0 + S1p[0] * a1 + S1p[1] * a2 + S1p[2] * a3; rows2p[dx] = S2p[-1] * a0 + S2p[0] * a1 + S2p[1] * a2 + S2p[2] * a3; rows3p[dx] = S3p[-1] * a0 + S3p[0] * a1 + S3p[1] * a2 + S3p[2] * a3; alphap += 4; } } else { // hresize four rows const float* S0 = src + (sy - 1) * in_w; // src.row(sy-1); const float* S1 = src + sy * in_w; // src.row(sy); const float* S2 = src + (sy + 1) * in_w; // src.row(sy+1); const float* S3 = src + (sy + 2) * in_w; // src.row(sy+2); const float* alphap = alpha; float* rows0p = rows0; float* rows1p = rows1; float* rows2p = rows2; float* rows3p = rows3; for (int dx = 0; dx < w; dx++) { int sx = xofs[dx]; const float* S0p = S0 + sx; const float* S1p = S1 + sx; const float* S2p = S2 + sx; const float* S3p = S3 + sx; float a0 = alphap[0]; float a1 = alphap[1]; float a2 = alphap[2]; float a3 = alphap[3]; rows0p[dx] = S0p[-1] * a0 + S0p[0] * a1 + S0p[1] * a2 + S0p[2] * a3; rows1p[dx] = S1p[-1] * a0 + S1p[0] * a1 + S1p[1] * a2 + S1p[2] * a3; rows2p[dx] = S2p[-1] * a0 + S2p[0] * a1 + S2p[1] * a2 + S2p[2] * a3; rows3p[dx] = S3p[-1] * a0 + S3p[0] * a1 + S3p[1] * a2 + S3p[2] * a3; alphap += 4; } } prev_sy1 = sy; // vresize float b0 = beta[0]; float b1 = beta[1]; float b2 = beta[2]; float b3 = beta[3]; float* rows0p = rows0; float* rows1p = rows1; float* rows2p = rows2; float* rows3p = rows3; float* Dp = dst + dy * out_w; // dst.row(dy); for (int dx = 0; dx < w; dx++) { *Dp++ = *rows0p++ * b0 + *rows1p++ * b1 + *rows2p++ * b2 + *rows3p++ * b3; } beta += 4; } sys_free(rowsbuf0); sys_free(rowsbuf1); sys_free(rowsbuf2); sys_free(rowsbuf3); } int interp_run(struct tensor* output_tensor, struct tensor* input_tensor, struct interp_param* interp_param, int num_thread) { int resize_type = interp_param->resize_type; int out_w = interp_param->output_width; int out_h = interp_param->output_height; float width_scale = interp_param->width_scale; float height_scale = interp_param->height_scale; int in_c = input_tensor->dims[1]; int in_h = input_tensor->dims[2]; int in_w = input_tensor->dims[3]; float* data = ( float* )input_tensor->data; float* out_data = ( float* )output_tensor->data; if (out_h == 0 || out_w == 0) { out_h = in_h * height_scale; out_w = in_w * width_scale; } if (out_h == in_h && out_w == in_w) { out_data = data; return 0; } int out_channel_size = out_h * out_w; int in_channel_size = in_h * in_w; if (input_tensor->dim_num == 1) { #pragma omp parallel for num_threads(num_thread) for (int q = 0; q < input_tensor->dims[0]; ++q) { for (int i = 0; i < out_h * out_w; i++) { out_data[q * out_h * out_w + i] = data[q]; } } return 0; } if (resize_type == 1) // nearest { #pragma omp parallel for num_threads(num_thread) for (int q = 0; q < in_c; q++) { for (int y = 0; y < out_h; ++y) { const int in_y = MIN(( int )(y / height_scale), (in_h - 1)); for (int x = 0; x < out_w; ++x) { const int in_x = MIN(( int )(x / width_scale), (in_w - 1)); out_data[out_w * y + x + out_w * out_h * q] = data[in_y * in_w + in_x + q * in_w * in_h]; } } } } else if (resize_type == 2) // bilinear { int* buf = ( int* )sys_malloc((out_w + out_h + out_w * 2 + out_h * 2) * sizeof(int)); int* xofs = buf; // new int[ow]; int* yofs = buf + out_w; // new int[oh]; float* alpha = ( float* )(buf + out_w + out_h); // new float[ow * 2]; float* beta = ( float* )(buf + out_w + out_h + out_w * 2); // new float[oh * 2]; linear_coeffs(in_w, out_w, xofs, alpha); linear_coeffs(in_h, out_h, yofs, beta); #pragma omp parallel for num_threads(num_thread) for (int q = 0; q < in_c; ++q) { resize_bilinear_image(data + in_channel_size * q, out_data + out_channel_size * q, alpha, xofs, beta, yofs, out_h, out_w, in_h, in_w); } sys_free(buf); } else if (resize_type == 3) // bicubic { int* buf = ( int* )sys_malloc((out_w + out_h + out_w * 4 + out_h * 4) * sizeof(int)); int* xofs = buf; // new int[ow]; int* yofs = buf + out_w; // new int[oh]; float* alpha = ( float* )(buf + out_w + out_h); // new float[ow * 4]; float* beta = ( float* )(buf + out_w + out_h + out_w * 4); // new float[oh * 4]; cubic_coeffs(in_w, out_w, xofs, alpha); cubic_coeffs(in_h, out_h, yofs, beta); #pragma omp parallel for num_threads(num_thread) for (int q = 0; q < in_c; q++) { resize_bicubic_image(data + in_channel_size * q, out_data + out_channel_size * q, alpha, xofs, beta, yofs, out_h, out_w, in_h, in_w); } sys_free(buf); return 0; } return 0; }
GB_binop__isle_int8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__isle_int8 // A.*B function (eWiseMult): GB_AemultB__isle_int8 // A*D function (colscale): GB_AxD__isle_int8 // D*A function (rowscale): GB_DxB__isle_int8 // C+=B function (dense accum): GB_Cdense_accumB__isle_int8 // C+=b function (dense accum): GB_Cdense_accumb__isle_int8 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__isle_int8 // C=scalar+B GB_bind1st__isle_int8 // C=scalar+B' GB_bind1st_tran__isle_int8 // C=A+scalar GB_bind2nd__isle_int8 // C=A'+scalar GB_bind2nd_tran__isle_int8 // C type: int8_t // A type: int8_t // B,b type: int8_t // BinaryOp: cij = (aij <= bij) #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ int8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int8_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x <= y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISLE || GxB_NO_INT8 || GxB_NO_ISLE_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__isle_int8 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__isle_int8 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__isle_int8 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__isle_int8 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *GB_RESTRICT Cx = (int8_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__isle_int8 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *GB_RESTRICT Cx = (int8_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__isle_int8 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__isle_int8 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__isle_int8 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *Cx = (int8_t *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = Bx [p] ; Cx [p] = (x <= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__isle_int8 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int8_t *Cx = (int8_t *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int8_t aij = Ax [p] ; Cx [p] = (aij <= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = (x <= aij) ; \ } GrB_Info GB_bind1st_tran__isle_int8 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = (aij <= y) ; \ } GrB_Info GB_bind2nd_tran__isle_int8 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_bitmap_assign_M_sub_template.c
//------------------------------------------------------------------------------ // GB_bitmap_assign_M_sub_template: traverse M for GB_SUBASSIGN //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // M is sparse or hypersparse, not bitmap or full. C(I,J)<M>= ... is being // computed (or !M), and all entries in M are traversed. For a given entry // M(iM,jM) in the mask, at location pM. The entry C(iC,jC) is at location pC, // where iC = I [iM] and jC = J [jM]. The matrix C is bitmap or full. // C is bitmap/full. M is sparse/hyper, and can be jumbled. { int tid ; #pragma omp parallel for num_threads(M_nthreads) schedule(dynamic,1) \ reduction(+:cnvals) for (tid = 0 ; tid < M_ntasks ; tid++) { int64_t kfirst = kfirst_Mslice [tid] ; int64_t klast = klast_Mslice [tid] ; int64_t task_cnvals = 0 ; //---------------------------------------------------------------------- // traverse over M (:,kfirst:klast) //---------------------------------------------------------------------- for (int64_t k = kfirst ; k <= klast ; k++) { //------------------------------------------------------------------ // find the part of M(:,k) for this task //------------------------------------------------------------------ int64_t jM = GBH (Mh, k) ; int64_t pM_start, pM_end ; GB_get_pA (&pM_start, &pM_end, tid, k, kfirst, klast, pstart_Mslice, Mp, mvlen) ; //------------------------------------------------------------------ // traverse over M(:,jM), the kth vector of M //------------------------------------------------------------------ // for subassign, M has same size as C(I,J) and A. int64_t jC = GB_ijlist (J, jM, Jkind, Jcolon) ; int64_t pC0 = jC * cvlen ; for (int64_t pM = pM_start ; pM < pM_end ; pM++) { bool mij = GB_mcast (Mx, pM, msize) ; if (mij) { int64_t iM = Mi [pM] ; int64_t iC = GB_ijlist (I, iM, Ikind, Icolon) ; int64_t pC = iC + pC0 ; GB_MASK_WORK (pC) ; // operate on Cx [pC] } } } cnvals += task_cnvals ; } }
CH4-15step.c
#include <math.h> #include <stdio.h> #include <string.h> #include <stdlib.h> #if defined(BL_FORT_USE_UPPERCASE) #define CKINDX CKINDX #define CKINIT CKINIT #define CKFINALIZE CKFINALIZE #define CKXNUM CKXNUM #define CKSYME CKSYME #define CKSYMS CKSYMS #define CKRP CKRP #define CKPX CKPX #define CKPY CKPY #define CKPC CKPC #define CKRHOX CKRHOX #define CKRHOY CKRHOY #define CKRHOC CKRHOC #define CKWT CKWT #define CKAWT CKAWT #define CKMMWY CKMMWY #define CKMMWX CKMMWX #define CKMMWC CKMMWC #define CKYTX CKYTX #define CKYTCP CKYTCP #define CKYTCR CKYTCR #define CKXTY CKXTY #define CKXTCP CKXTCP #define CKXTCR CKXTCR #define CKCTX CKCTX #define CKCTY CKCTY #define CKCPOR CKCPOR #define CKHORT CKHORT #define CKSOR CKSOR #define CKCVML CKCVML #define CKCPML CKCPML #define CKUML CKUML #define CKHML CKHML #define CKGML CKGML #define CKAML CKAML #define CKSML CKSML #define CKCVMS CKCVMS #define CKCPMS CKCPMS #define CKUMS CKUMS #define CKHMS CKHMS #define CKGMS CKGMS #define CKAMS CKAMS #define CKSMS CKSMS #define CKCPBL CKCPBL #define CKCPBS CKCPBS #define CKCVBL CKCVBL #define CKCVBS CKCVBS #define CKHBML CKHBML #define CKHBMS CKHBMS #define CKUBML CKUBML #define CKUBMS CKUBMS #define CKSBML CKSBML #define CKSBMS CKSBMS #define CKGBML CKGBML #define CKGBMS CKGBMS #define CKABML CKABML #define CKABMS CKABMS /* #define CKWC CKWC */ #define CKWYP CKWYP #define CKWXP CKWXP #define CKWYR CKWYR #define CKWXR CKWXR #define CKQC CKQC #define CKKFKR CKKFKR #define CKQYP CKQYP #define CKQXP CKQXP #define CKQYR CKQYR #define CKQXR CKQXR #define CKNU CKNU #define CKNCF CKNCF #define CKABE CKABE #define CKEQC CKEQC #define CKEQYP CKEQYP #define CKEQXP CKEQXP #define CKEQYR CKEQYR #define CKEQXR CKEQXR #define DWDOT DWDOT #define VCKHMS VCKHMS #define VCKPY VCKPY #define VCKWYR VCKWYR #define VCKYTX VCKYTX #define GET_T_GIVEN_EY GET_T_GIVEN_EY #define GET_T_GIVEN_HY GET_T_GIVEN_HY #define GET_REACTION_MAP GET_REACTION_MAP #define GET_CRITPARAMS GET_CRITPARAMS #elif defined(BL_FORT_USE_LOWERCASE) #define CKINDX ckindx #define CKINIT ckinit #define CKFINALIZE ckfinalize #define CKXNUM ckxnum #define CKSYME cksyme #define CKSYMS cksyms #define CKRP ckrp #define CKPX ckpx #define CKPY ckpy #define CKPC ckpc #define CKRHOX ckrhox #define CKRHOY ckrhoy #define CKRHOC ckrhoc #define CKWT ckwt #define CKAWT ckawt #define CKMMWY ckmmwy #define CKMMWX ckmmwx #define CKMMWC ckmmwc #define CKYTX ckytx #define CKYTCP ckytcp #define CKYTCR ckytcr #define CKXTY ckxty #define CKXTCP ckxtcp #define CKXTCR ckxtcr #define CKCTX ckctx #define CKCTY ckcty #define CKCPOR ckcpor #define CKHORT ckhort #define CKSOR cksor #define CKCVML ckcvml #define CKCPML ckcpml #define CKUML ckuml #define CKHML ckhml #define CKGML ckgml #define CKAML ckaml #define CKSML cksml #define CKCVMS ckcvms #define CKCPMS ckcpms #define CKUMS ckums #define CKHMS ckhms #define CKGMS ckgms #define CKAMS ckams #define CKSMS cksms #define CKCPBL ckcpbl #define CKCPBS ckcpbs #define CKCVBL ckcvbl #define CKCVBS ckcvbs #define CKHBML ckhbml #define CKHBMS ckhbms #define CKUBML ckubml #define CKUBMS ckubms #define CKSBML cksbml #define CKSBMS cksbms #define CKGBML ckgbml #define CKGBMS ckgbms #define CKABML ckabml #define CKABMS ckabms /* #define CKWC ckwc */ #define CKWYP ckwyp #define CKWXP ckwxp #define CKWYR ckwyr #define CKWXR ckwxr #define CKQC ckqc #define CKKFKR ckkfkr #define CKQYP ckqyp #define CKQXP ckqxp #define CKQYR ckqyr #define CKQXR ckqxr #define CKNU cknu #define CKNCF ckncf #define CKABE ckabe #define CKEQC ckeqc #define CKEQYP ckeqyp #define CKEQXP ckeqxp #define CKEQYR ckeqyr #define CKEQXR ckeqxr #define DWDOT dwdot #define VCKHMS vckhms #define VCKPY vckpy #define VCKWYR vckwyr #define VCKYTX vckytx #define GET_T_GIVEN_EY get_t_given_ey #define GET_T_GIVEN_HY get_t_given_hy #define GET_REACTION_MAP get_reaction_map #define GET_CRITPARAMS get_critparams #elif defined(BL_FORT_USE_UNDERSCORE) #define CKINDX ckindx_ #define CKINIT ckinit_ #define CKFINALIZE ckfinalize_ #define CKXNUM ckxnum_ #define CKSYME cksyme_ #define CKSYMS cksyms_ #define CKRP ckrp_ #define CKPX ckpx_ #define CKPY ckpy_ #define CKPC ckpc_ #define CKRHOX ckrhox_ #define CKRHOY ckrhoy_ #define CKRHOC ckrhoc_ #define CKWT ckwt_ #define CKAWT ckawt_ #define CKMMWY ckmmwy_ #define CKMMWX ckmmwx_ #define CKMMWC ckmmwc_ #define CKYTX ckytx_ #define CKYTCP ckytcp_ #define CKYTCR ckytcr_ #define CKXTY ckxty_ #define CKXTCP ckxtcp_ #define CKXTCR ckxtcr_ #define CKCTX ckctx_ #define CKCTY ckcty_ #define CKCPOR ckcpor_ #define CKHORT ckhort_ #define CKSOR cksor_ #define CKCVML ckcvml_ #define CKCPML ckcpml_ #define CKUML ckuml_ #define CKHML ckhml_ #define CKGML ckgml_ #define CKAML ckaml_ #define CKSML cksml_ #define CKCVMS ckcvms_ #define CKCPMS ckcpms_ #define CKUMS ckums_ #define CKHMS ckhms_ #define CKGMS ckgms_ #define CKAMS ckams_ #define CKSMS cksms_ #define CKCPBL ckcpbl_ #define CKCPBS ckcpbs_ #define CKCVBL ckcvbl_ #define CKCVBS ckcvbs_ #define CKHBML ckhbml_ #define CKHBMS ckhbms_ #define CKUBML ckubml_ #define CKUBMS ckubms_ #define CKSBML cksbml_ #define CKSBMS cksbms_ #define CKGBML ckgbml_ #define CKGBMS ckgbms_ #define CKABML ckabml_ #define CKABMS ckabms_ /* #define CKWC ckwc_ */ #define CKWYP ckwyp_ #define CKWXP ckwxp_ #define CKWYR ckwyr_ #define CKWXR ckwxr_ #define CKQC ckqc_ #define CKKFKR ckkfkr_ #define CKQYP ckqyp_ #define CKQXP ckqxp_ #define CKQYR ckqyr_ #define CKQXR ckqxr_ #define CKNU cknu_ #define CKNCF ckncf_ #define CKABE ckabe_ #define CKEQC ckeqc_ #define CKEQYP ckeqyp_ #define CKEQXP ckeqxp_ #define CKEQYR ckeqyr_ #define CKEQXR ckeqxr_ #define DWDOT dwdot_ #define VCKHMS vckhms_ #define VCKPY vckpy_ #define VCKWYR vckwyr_ #define VCKYTX vckytx_ #define GET_T_GIVEN_EY get_t_given_ey_ #define GET_T_GIVEN_HY get_t_given_hy_ #define GET_REACTION_MAP get_reaction_map_ #define GET_CRITPARAMS get_critparams_ #endif /*function declarations */ #if defined(BL_FORT_USE_UPPERCASE) #define egtransetEPS EGTRANSETEPS #elif defined(BL_FORT_USE_LOWERCASE) #define egtransetEPS egtranseteps #elif defined(BL_FORT_USE_UNDERSCORE) #define egtransetEPS egtranseteps_ #endif void egtransetEPS(double * EPS); #if defined(BL_FORT_USE_UPPERCASE) #define egtransetSIG EGTRANSETSIG #elif defined(BL_FORT_USE_LOWERCASE) #define egtransetSIG egtransetsig #elif defined(BL_FORT_USE_UNDERSCORE) #define egtransetSIG egtransetsig_ #endif void egtransetSIG(double* SIG); void atomicWeight(double * restrict awt); void molecularWeight(double * restrict wt); void gibbs(double * restrict species, double * restrict tc); void helmholtz(double * restrict species, double * restrict tc); void speciesInternalEnergy(double * restrict species, double * restrict tc); void speciesEnthalpy(double * restrict species, double * restrict tc); void speciesEntropy(double * restrict species, double * restrict tc); void cp_R(double * restrict species, double * restrict tc); void cv_R(double * restrict species, double * restrict tc); void equilibriumConstants(double * restrict kc, double * restrict g_RT, double T); void productionRate(double * restrict wdot, double * restrict sc, double T); void comp_k_f(double * restrict tc, double invT, double * restrict k_f); void comp_Kc(double * restrict tc, double invT, double * restrict Kc); void comp_qfqr(double * restrict q_f, double * restrict q_r, double * restrict sc, double * restrict tc, double invT); void progressRate(double * restrict qdot, double * restrict speciesConc, double T); void progressRateFR(double * restrict q_f, double * restrict q_r, double * restrict speciesConc, double T); void CKINIT(); void CKFINALIZE(); void CKINDX(int * iwrk, double * restrict rwrk, int * mm, int * kk, int * ii, int * nfit ); void CKXNUM(char * line, int * nexp, int * lout, int * nval, double * restrict rval, int * kerr, int lenline); void CKSNUM(char * line, int * nexp, int * lout, char * kray, int * nn, int * knum, int * nval, double * restrict rval, int * kerr, int lenline, int lenkray); void CKSYME(int * kname, int * lenkname); void CKSYMS(int * kname, int * lenkname); void CKRP(int * ickwrk, double * restrict rckwrk, double * restrict ru, double * restrict ruc, double * restrict pa); void CKPX(double * restrict rho, double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict P); void CKPY(double * restrict rho, double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict P); void CKPC(double * restrict rho, double * restrict T, double * restrict c, int * iwrk, double * restrict rwrk, double * restrict P); void CKRHOX(double * restrict P, double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict rho); void CKRHOY(double * restrict P, double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict rho); void CKRHOC(double * restrict P, double * restrict T, double * restrict c, int * iwrk, double * restrict rwrk, double * restrict rho); void CKWT(int * iwrk, double * restrict rwrk, double * restrict wt); void CKAWT(int * iwrk, double * restrict rwrk, double * restrict awt); void CKMMWY(double * restrict y, int * iwrk, double * restrict rwrk, double * restrict wtm); void CKMMWX(double * restrict x, int * iwrk, double * restrict rwrk, double * restrict wtm); void CKMMWC(double * restrict c, int * iwrk, double * restrict rwrk, double * restrict wtm); void CKYTX(double * restrict y, int * iwrk, double * restrict rwrk, double * restrict x); void CKYTCP(double * restrict P, double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict c); void CKYTCR(double * restrict rho, double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict c); void CKXTY(double * restrict x, int * iwrk, double * restrict rwrk, double * restrict y); void CKXTCP(double * restrict P, double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict c); void CKXTCR(double * restrict rho, double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict c); void CKCTX(double * restrict c, int * iwrk, double * restrict rwrk, double * restrict x); void CKCTY(double * restrict c, int * iwrk, double * restrict rwrk, double * restrict y); void CKCPOR(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict cpor); void CKHORT(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict hort); void CKSOR(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict sor); void CKCVML(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict cvml); void CKCPML(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict cvml); void CKUML(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict uml); void CKHML(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict uml); void CKGML(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict gml); void CKAML(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict aml); void CKSML(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict sml); void CKCVMS(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict cvms); void CKCPMS(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict cvms); void CKUMS(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict ums); void CKHMS(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict ums); void CKGMS(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict gms); void CKAMS(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict ams); void CKSMS(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict sms); void CKCPBL(double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict cpbl); void CKCPBS(double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict cpbs); void CKCVBL(double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict cpbl); void CKCVBS(double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict cpbs); void CKHBML(double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict hbml); void CKHBMS(double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict hbms); void CKUBML(double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict ubml); void CKUBMS(double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict ubms); void CKSBML(double * restrict P, double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict sbml); void CKSBMS(double * restrict P, double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict sbms); void CKGBML(double * restrict P, double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict gbml); void CKGBMS(double * restrict P, double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict gbms); void CKABML(double * restrict P, double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict abml); void CKABMS(double * restrict P, double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict abms); /* void CKWC(double * restrict T, double * restrict C, int * iwrk, double * restrict rwrk, double * restrict wdot); */ void CKWYP(double * restrict P, double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict wdot); void CKWXP(double * restrict P, double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict wdot); void CKWYR(double * restrict rho, double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict wdot); void CKWXR(double * restrict rho, double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict wdot); void CKQC(double * restrict T, double * restrict C, int * iwrk, double * restrict rwrk, double * restrict qdot); void CKKFKR(double * restrict P, double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict q_f, double * restrict q_r); void CKQYP(double * restrict P, double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict qdot); void CKQXP(double * restrict P, double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict qdot); void CKQYR(double * restrict rho, double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict qdot); void CKQXR(double * restrict rho, double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict qdot); void CKNU(int * kdim, int * iwrk, double * restrict rwrk, int * nuki); void CKNCF(int * mdim, int * iwrk, double * restrict rwrk, int * ncf); void CKABE(int * iwrk, double * restrict rwrk, double * restrict a, double * restrict b, double * restrict e ); void CKEQC(double * restrict T, double * restrict C , int * iwrk, double * restrict rwrk, double * restrict eqcon ); void CKEQYP(double * restrict P, double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict eqcon); void CKEQXP(double * restrict P, double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict eqcon); void CKEQYR(double * restrict rho, double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict eqcon); void CKEQXR(double * restrict rho, double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict eqcon); void DWDOT(double * restrict J, double * restrict sc, double * restrict T, int * consP); void aJacobian(double * restrict J, double * restrict sc, double T, int consP); void dcvpRdT(double * restrict species, double * restrict tc); void GET_T_GIVEN_EY(double * restrict e, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict t, int *ierr); void GET_T_GIVEN_HY(double * restrict h, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict t, int *ierr); void GET_REACTION_MAP(int * restrict rmap); /*vector version */ void vproductionRate(int npt, double * restrict wdot, double * restrict c, double * restrict T); void VCKHMS(int * restrict np, double * restrict T, int * iwrk, double * restrict rwrk, double * restrict ums); void VCKPY(int * restrict np, double * restrict rho, double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict P); void VCKWYR(int * restrict np, double * restrict rho, double * restrict T, double * restrict y, int * restrict iwrk, double * restrict rwrk, double * restrict wdot); void VCKYTX(int * restrict np, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict x); void vcomp_k_f(int npt, double * restrict k_f_s, double * restrict tc, double * restrict invT); void vcomp_gibbs(int npt, double * restrict g_RT, double * restrict tc); void vcomp_Kc(int npt, double * restrict Kc_s, double * restrict g_RT, double * restrict invT); void GET_CRITPARAMS(double * restrict Tci, double * restrict ai, double * restrict bi, double * restrict acentric_i); void vcomp_wdot(int npt, double * restrict wdot, double * restrict mixture, double * restrict sc, double * restrict k_f_s, double * restrict Kc_s, double * restrict tc, double * restrict invT, double * restrict T); /* Inverse molecular weights */ static const double imw[19] = { 1.0 / 2.015940, /*H2 */ 1.0 / 1.007970, /*H */ 1.0 / 31.998800, /*O2 */ 1.0 / 17.007370, /*OH */ 1.0 / 18.015340, /*H2O */ 1.0 / 33.006770, /*HO2 */ 1.0 / 34.014740, /*H2O2 */ 1.0 / 15.035060, /*CH3 */ 1.0 / 16.043030, /*CH4 */ 1.0 / 28.010550, /*CO */ 1.0 / 44.009950, /*CO2 */ 1.0 / 30.026490, /*CH2O */ 1.0 / 26.038240, /*C2H2 */ 1.0 / 28.054180, /*C2H4 */ 1.0 / 30.070120, /*C2H6 */ 1.0 / 17.030610, /*NH3 */ 1.0 / 30.006100, /*NO */ 1.0 / 27.025820, /*HCN */ 1.0 / 28.013400}; /*N2 */ static double fwd_A[0], fwd_beta[0], fwd_Ea[0]; static double low_A[0], low_beta[0], low_Ea[0]; static double rev_A[0], rev_beta[0], rev_Ea[0]; static double troe_a[0],troe_Ts[0], troe_Tss[0], troe_Tsss[0]; static double sri_a[0], sri_b[0], sri_c[0], sri_d[0], sri_e[0]; static double activation_units[0], prefactor_units[0], phase_units[0]; static int is_PD[0], troe_len[0], sri_len[0], nTB[0], *TBid[0]; static double *TB[0]; static double fwd_A_DEF[0], fwd_beta_DEF[0], fwd_Ea_DEF[0]; static double low_A_DEF[0], low_beta_DEF[0], low_Ea_DEF[0]; static double rev_A_DEF[0], rev_beta_DEF[0], rev_Ea_DEF[0]; static double troe_a_DEF[0],troe_Ts_DEF[0], troe_Tss_DEF[0], troe_Tsss_DEF[0]; static double sri_a_DEF[0], sri_b_DEF[0], sri_c_DEF[0], sri_d_DEF[0], sri_e_DEF[0]; static double activation_units_DEF[0], prefactor_units_DEF[0], phase_units_DEF[0]; static int is_PD_DEF[0], troe_len_DEF[0], sri_len_DEF[0], nTB_DEF[0], *TBid_DEF[0]; static double *TB_DEF[0]; static int rxn_map[0] = {}; void GET_REACTION_MAP(int *rmap) { for (int i=0; i<0; ++i) { rmap[i] = rxn_map[i]; } } #include <ReactionData.H> double* GetParamPtr(int reaction_id, REACTION_PARAMETER param_id, int species_id, int get_default) { double* ret = 0; if (reaction_id<0 || reaction_id>=0) { printf("Bad reaction id = %d",reaction_id); abort(); }; int mrid = rxn_map[reaction_id]; if (param_id == THIRD_BODY) { if (species_id<0 || species_id>=19) { printf("GetParamPtr: Bad species id = %d",species_id); abort(); } if (get_default) { for (int i=0; i<nTB_DEF[mrid]; ++i) { if (species_id == TBid_DEF[mrid][i]) { ret = &(TB_DEF[mrid][i]); } } } else { for (int i=0; i<nTB[mrid]; ++i) { if (species_id == TBid[mrid][i]) { ret = &(TB[mrid][i]); } } } if (ret == 0) { printf("GetParamPtr: No TB for reaction id = %d",reaction_id); abort(); } } else { if ( param_id == FWD_A) {ret = (get_default ? &(fwd_A_DEF[mrid]) : &(fwd_A[mrid]));} else if (param_id == FWD_BETA) {ret = (get_default ? &(fwd_beta_DEF[mrid]) : &(fwd_beta[mrid]));} else if (param_id == FWD_EA) {ret = (get_default ? &(fwd_Ea_DEF[mrid]) : &(fwd_Ea[mrid]));} else if (param_id == LOW_A) {ret = (get_default ? &(low_A_DEF[mrid]) : &(low_A[mrid]));} else if (param_id == LOW_BETA) {ret = (get_default ? &(low_beta_DEF[mrid]) : &(low_beta[mrid]));} else if (param_id == LOW_EA) {ret = (get_default ? &(low_Ea_DEF[mrid]) : &(low_Ea[mrid]));} else if (param_id == REV_A) {ret = (get_default ? &(rev_A_DEF[mrid]) : &(rev_A[mrid]));} else if (param_id == REV_BETA) {ret = (get_default ? &(rev_beta_DEF[mrid]) : &(rev_beta[mrid]));} else if (param_id == REV_EA) {ret = (get_default ? &(rev_Ea_DEF[mrid]) : &(rev_Ea[mrid]));} else if (param_id == TROE_A) {ret = (get_default ? &(troe_a_DEF[mrid]) : &(troe_a[mrid]));} else if (param_id == TROE_TS) {ret = (get_default ? &(troe_Ts_DEF[mrid]) : &(troe_Ts[mrid]));} else if (param_id == TROE_TSS) {ret = (get_default ? &(troe_Tss_DEF[mrid]) : &(troe_Tss[mrid]));} else if (param_id == TROE_TSSS) {ret = (get_default ? &(troe_Tsss_DEF[mrid]) : &(troe_Tsss[mrid]));} else if (param_id == SRI_A) {ret = (get_default ? &(sri_a_DEF[mrid]) : &(sri_a[mrid]));} else if (param_id == SRI_B) {ret = (get_default ? &(sri_b_DEF[mrid]) : &(sri_b[mrid]));} else if (param_id == SRI_C) {ret = (get_default ? &(sri_c_DEF[mrid]) : &(sri_c[mrid]));} else if (param_id == SRI_D) {ret = (get_default ? &(sri_d_DEF[mrid]) : &(sri_d[mrid]));} else if (param_id == SRI_E) {ret = (get_default ? &(sri_e_DEF[mrid]) : &(sri_e[mrid]));} else { printf("GetParamPtr: Unknown parameter id"); abort(); } } return ret; } void ResetAllParametersToDefault() { for (int i=0; i<0; i++) { if (nTB[i] != 0) { nTB[i] = 0; free(TB[i]); free(TBid[i]); } fwd_A[i] = fwd_A_DEF[i]; fwd_beta[i] = fwd_beta_DEF[i]; fwd_Ea[i] = fwd_Ea_DEF[i]; low_A[i] = low_A_DEF[i]; low_beta[i] = low_beta_DEF[i]; low_Ea[i] = low_Ea_DEF[i]; rev_A[i] = rev_A_DEF[i]; rev_beta[i] = rev_beta_DEF[i]; rev_Ea[i] = rev_Ea_DEF[i]; troe_a[i] = troe_a_DEF[i]; troe_Ts[i] = troe_Ts_DEF[i]; troe_Tss[i] = troe_Tss_DEF[i]; troe_Tsss[i] = troe_Tsss_DEF[i]; sri_a[i] = sri_a_DEF[i]; sri_b[i] = sri_b_DEF[i]; sri_c[i] = sri_c_DEF[i]; sri_d[i] = sri_d_DEF[i]; sri_e[i] = sri_e_DEF[i]; is_PD[i] = is_PD_DEF[i]; troe_len[i] = troe_len_DEF[i]; sri_len[i] = sri_len_DEF[i]; activation_units[i] = activation_units_DEF[i]; prefactor_units[i] = prefactor_units_DEF[i]; phase_units[i] = phase_units_DEF[i]; nTB[i] = nTB_DEF[i]; if (nTB[i] != 0) { TB[i] = (double *) malloc(sizeof(double) * nTB[i]); TBid[i] = (int *) malloc(sizeof(int) * nTB[i]); for (int j=0; j<nTB[i]; j++) { TB[i][j] = TB_DEF[i][j]; TBid[i][j] = TBid_DEF[i][j]; } } } } void SetAllDefaults() { for (int i=0; i<0; i++) { if (nTB_DEF[i] != 0) { nTB_DEF[i] = 0; free(TB_DEF[i]); free(TBid_DEF[i]); } fwd_A_DEF[i] = fwd_A[i]; fwd_beta_DEF[i] = fwd_beta[i]; fwd_Ea_DEF[i] = fwd_Ea[i]; low_A_DEF[i] = low_A[i]; low_beta_DEF[i] = low_beta[i]; low_Ea_DEF[i] = low_Ea[i]; rev_A_DEF[i] = rev_A[i]; rev_beta_DEF[i] = rev_beta[i]; rev_Ea_DEF[i] = rev_Ea[i]; troe_a_DEF[i] = troe_a[i]; troe_Ts_DEF[i] = troe_Ts[i]; troe_Tss_DEF[i] = troe_Tss[i]; troe_Tsss_DEF[i] = troe_Tsss[i]; sri_a_DEF[i] = sri_a[i]; sri_b_DEF[i] = sri_b[i]; sri_c_DEF[i] = sri_c[i]; sri_d_DEF[i] = sri_d[i]; sri_e_DEF[i] = sri_e[i]; is_PD_DEF[i] = is_PD[i]; troe_len_DEF[i] = troe_len[i]; sri_len_DEF[i] = sri_len[i]; activation_units_DEF[i] = activation_units[i]; prefactor_units_DEF[i] = prefactor_units[i]; phase_units_DEF[i] = phase_units[i]; nTB_DEF[i] = nTB[i]; if (nTB_DEF[i] != 0) { TB_DEF[i] = (double *) malloc(sizeof(double) * nTB_DEF[i]); TBid_DEF[i] = (int *) malloc(sizeof(int) * nTB_DEF[i]); for (int j=0; j<nTB_DEF[i]; j++) { TB_DEF[i][j] = TB[i][j]; TBid_DEF[i][j] = TBid[i][j]; } } } } /* Finalizes parameter database */ void CKFINALIZE() { for (int i=0; i<0; ++i) { free(TB[i]); TB[i] = 0; free(TBid[i]); TBid[i] = 0; nTB[i] = 0; free(TB_DEF[i]); TB_DEF[i] = 0; free(TBid_DEF[i]); TBid_DEF[i] = 0; nTB_DEF[i] = 0; } } /* Initializes parameter database */ void CKINIT() { SetAllDefaults(); } /*A few mechanism parameters */ void CKINDX(int * iwrk, double * restrict rwrk, int * mm, int * kk, int * ii, int * nfit) { *mm = 4; *kk = 19; *ii = 0; *nfit = -1; /*Why do you need this anyway ? */ } /* ckxnum... for parsing strings */ void CKXNUM(char * line, int * nexp, int * lout, int * nval, double * restrict rval, int * kerr, int lenline ) { int n,i; /*Loop Counters */ char cstr[1000]; char *saveptr; char *p; /*String Tokens */ /* Strip Comments */ for (i=0; i<lenline; ++i) { if (line[i]=='!') { break; } cstr[i] = line[i]; } cstr[i] = '\0'; p = strtok_r(cstr," ", &saveptr); if (!p) { *nval = 0; *kerr = 1; return; } for (n=0; n<*nexp; ++n) { rval[n] = atof(p); p = strtok_r(NULL, " ", &saveptr); if (!p) break; } *nval = n+1; if (*nval < *nexp) *kerr = 1; return; } /* cksnum... for parsing strings */ void CKSNUM(char * line, int * nexp, int * lout, char * kray, int * nn, int * knum, int * nval, double * restrict rval, int * kerr, int lenline, int lenkray) { /*Not done yet ... */ } /* Returns the char strings of element names */ void CKSYME(int * kname, int * plenkname ) { int i; /*Loop Counter */ int lenkname = *plenkname; /*clear kname */ for (i=0; i<lenkname*4; i++) { kname[i] = ' '; } /* O */ kname[ 0*lenkname + 0 ] = 'O'; kname[ 0*lenkname + 1 ] = ' '; /* H */ kname[ 1*lenkname + 0 ] = 'H'; kname[ 1*lenkname + 1 ] = ' '; /* C */ kname[ 2*lenkname + 0 ] = 'C'; kname[ 2*lenkname + 1 ] = ' '; /* N */ kname[ 3*lenkname + 0 ] = 'N'; kname[ 3*lenkname + 1 ] = ' '; } /* Returns the char strings of species names */ void CKSYMS(int * kname, int * plenkname ) { int i; /*Loop Counter */ int lenkname = *plenkname; /*clear kname */ for (i=0; i<lenkname*19; i++) { kname[i] = ' '; } /* H2 */ kname[ 0*lenkname + 0 ] = 'H'; kname[ 0*lenkname + 1 ] = '2'; kname[ 0*lenkname + 2 ] = ' '; /* H */ kname[ 1*lenkname + 0 ] = 'H'; kname[ 1*lenkname + 1 ] = ' '; /* O2 */ kname[ 2*lenkname + 0 ] = 'O'; kname[ 2*lenkname + 1 ] = '2'; kname[ 2*lenkname + 2 ] = ' '; /* OH */ kname[ 3*lenkname + 0 ] = 'O'; kname[ 3*lenkname + 1 ] = 'H'; kname[ 3*lenkname + 2 ] = ' '; /* H2O */ kname[ 4*lenkname + 0 ] = 'H'; kname[ 4*lenkname + 1 ] = '2'; kname[ 4*lenkname + 2 ] = 'O'; kname[ 4*lenkname + 3 ] = ' '; /* HO2 */ kname[ 5*lenkname + 0 ] = 'H'; kname[ 5*lenkname + 1 ] = 'O'; kname[ 5*lenkname + 2 ] = '2'; kname[ 5*lenkname + 3 ] = ' '; /* H2O2 */ kname[ 6*lenkname + 0 ] = 'H'; kname[ 6*lenkname + 1 ] = '2'; kname[ 6*lenkname + 2 ] = 'O'; kname[ 6*lenkname + 3 ] = '2'; kname[ 6*lenkname + 4 ] = ' '; /* CH3 */ kname[ 7*lenkname + 0 ] = 'C'; kname[ 7*lenkname + 1 ] = 'H'; kname[ 7*lenkname + 2 ] = '3'; kname[ 7*lenkname + 3 ] = ' '; /* CH4 */ kname[ 8*lenkname + 0 ] = 'C'; kname[ 8*lenkname + 1 ] = 'H'; kname[ 8*lenkname + 2 ] = '4'; kname[ 8*lenkname + 3 ] = ' '; /* CO */ kname[ 9*lenkname + 0 ] = 'C'; kname[ 9*lenkname + 1 ] = 'O'; kname[ 9*lenkname + 2 ] = ' '; /* CO2 */ kname[ 10*lenkname + 0 ] = 'C'; kname[ 10*lenkname + 1 ] = 'O'; kname[ 10*lenkname + 2 ] = '2'; kname[ 10*lenkname + 3 ] = ' '; /* CH2O */ kname[ 11*lenkname + 0 ] = 'C'; kname[ 11*lenkname + 1 ] = 'H'; kname[ 11*lenkname + 2 ] = '2'; kname[ 11*lenkname + 3 ] = 'O'; kname[ 11*lenkname + 4 ] = ' '; /* C2H2 */ kname[ 12*lenkname + 0 ] = 'C'; kname[ 12*lenkname + 1 ] = '2'; kname[ 12*lenkname + 2 ] = 'H'; kname[ 12*lenkname + 3 ] = '2'; kname[ 12*lenkname + 4 ] = ' '; /* C2H4 */ kname[ 13*lenkname + 0 ] = 'C'; kname[ 13*lenkname + 1 ] = '2'; kname[ 13*lenkname + 2 ] = 'H'; kname[ 13*lenkname + 3 ] = '4'; kname[ 13*lenkname + 4 ] = ' '; /* C2H6 */ kname[ 14*lenkname + 0 ] = 'C'; kname[ 14*lenkname + 1 ] = '2'; kname[ 14*lenkname + 2 ] = 'H'; kname[ 14*lenkname + 3 ] = '6'; kname[ 14*lenkname + 4 ] = ' '; /* NH3 */ kname[ 15*lenkname + 0 ] = 'N'; kname[ 15*lenkname + 1 ] = 'H'; kname[ 15*lenkname + 2 ] = '3'; kname[ 15*lenkname + 3 ] = ' '; /* NO */ kname[ 16*lenkname + 0 ] = 'N'; kname[ 16*lenkname + 1 ] = 'O'; kname[ 16*lenkname + 2 ] = ' '; /* HCN */ kname[ 17*lenkname + 0 ] = 'H'; kname[ 17*lenkname + 1 ] = 'C'; kname[ 17*lenkname + 2 ] = 'N'; kname[ 17*lenkname + 3 ] = ' '; /* N2 */ kname[ 18*lenkname + 0 ] = 'N'; kname[ 18*lenkname + 1 ] = '2'; kname[ 18*lenkname + 2 ] = ' '; } /* Returns R, Rc, Patm */ void CKRP(int * ickwrk, double * restrict rckwrk, double * restrict ru, double * restrict ruc, double * restrict pa) { *ru = 8.31451e+07; *ruc = 1.98721558317399615845; *pa = 1.01325e+06; } /*Compute P = rhoRT/W(x) */ void CKPX(double * restrict rho, double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict P) { double XW = 0;/* To hold mean molecular wt */ XW += x[0]*2.015940; /*H2 */ XW += x[1]*1.007970; /*H */ XW += x[2]*31.998800; /*O2 */ XW += x[3]*17.007370; /*OH */ XW += x[4]*18.015340; /*H2O */ XW += x[5]*33.006770; /*HO2 */ XW += x[6]*34.014740; /*H2O2 */ XW += x[7]*15.035060; /*CH3 */ XW += x[8]*16.043030; /*CH4 */ XW += x[9]*28.010550; /*CO */ XW += x[10]*44.009950; /*CO2 */ XW += x[11]*30.026490; /*CH2O */ XW += x[12]*26.038240; /*C2H2 */ XW += x[13]*28.054180; /*C2H4 */ XW += x[14]*30.070120; /*C2H6 */ XW += x[15]*17.030610; /*NH3 */ XW += x[16]*30.006100; /*NO */ XW += x[17]*27.025820; /*HCN */ XW += x[18]*28.013400; /*N2 */ *P = *rho * 8.31451e+07 * (*T) / XW; /*P = rho*R*T/W */ return; } /*Compute P = rhoRT/W(y) */ void CKPY(double * restrict rho, double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict P) { double YOW = 0;/* for computing mean MW */ YOW += y[0]*imw[0]; /*H2 */ YOW += y[1]*imw[1]; /*H */ YOW += y[2]*imw[2]; /*O2 */ YOW += y[3]*imw[3]; /*OH */ YOW += y[4]*imw[4]; /*H2O */ YOW += y[5]*imw[5]; /*HO2 */ YOW += y[6]*imw[6]; /*H2O2 */ YOW += y[7]*imw[7]; /*CH3 */ YOW += y[8]*imw[8]; /*CH4 */ YOW += y[9]*imw[9]; /*CO */ YOW += y[10]*imw[10]; /*CO2 */ YOW += y[11]*imw[11]; /*CH2O */ YOW += y[12]*imw[12]; /*C2H2 */ YOW += y[13]*imw[13]; /*C2H4 */ YOW += y[14]*imw[14]; /*C2H6 */ YOW += y[15]*imw[15]; /*NH3 */ YOW += y[16]*imw[16]; /*NO */ YOW += y[17]*imw[17]; /*HCN */ YOW += y[18]*imw[18]; /*N2 */ *P = *rho * 8.31451e+07 * (*T) * YOW; /*P = rho*R*T/W */ return; } /*Compute P = rhoRT/W(y) */ void VCKPY(int * restrict np, double * restrict rho, double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict P) { double YOW[*np]; for (int i=0; i<(*np); i++) { YOW[i] = 0.0; } for (int n=0; n<19; n++) { for (int i=0; i<(*np); i++) { YOW[i] += y[n*(*np)+i] * imw[n]; } } for (int i=0; i<(*np); i++) { P[i] = rho[i] * 8.31451e+07 * T[i] * YOW[i]; /*P = rho*R*T/W */ } return; } /*Compute P = rhoRT/W(c) */ void CKPC(double * restrict rho, double * restrict T, double * restrict c, int * iwrk, double * restrict rwrk, double * restrict P) { int id; /*loop counter */ /*See Eq 5 in CK Manual */ double W = 0; double sumC = 0; W += c[0]*2.015940; /*H2 */ W += c[1]*1.007970; /*H */ W += c[2]*31.998800; /*O2 */ W += c[3]*17.007370; /*OH */ W += c[4]*18.015340; /*H2O */ W += c[5]*33.006770; /*HO2 */ W += c[6]*34.014740; /*H2O2 */ W += c[7]*15.035060; /*CH3 */ W += c[8]*16.043030; /*CH4 */ W += c[9]*28.010550; /*CO */ W += c[10]*44.009950; /*CO2 */ W += c[11]*30.026490; /*CH2O */ W += c[12]*26.038240; /*C2H2 */ W += c[13]*28.054180; /*C2H4 */ W += c[14]*30.070120; /*C2H6 */ W += c[15]*17.030610; /*NH3 */ W += c[16]*30.006100; /*NO */ W += c[17]*27.025820; /*HCN */ W += c[18]*28.013400; /*N2 */ for (id = 0; id < 19; ++id) { sumC += c[id]; } *P = *rho * 8.31451e+07 * (*T) * sumC / W; /*P = rho*R*T/W */ return; } /*Compute rho = PW(x)/RT */ void CKRHOX(double * restrict P, double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict rho) { double XW = 0;/* To hold mean molecular wt */ XW += x[0]*2.015940; /*H2 */ XW += x[1]*1.007970; /*H */ XW += x[2]*31.998800; /*O2 */ XW += x[3]*17.007370; /*OH */ XW += x[4]*18.015340; /*H2O */ XW += x[5]*33.006770; /*HO2 */ XW += x[6]*34.014740; /*H2O2 */ XW += x[7]*15.035060; /*CH3 */ XW += x[8]*16.043030; /*CH4 */ XW += x[9]*28.010550; /*CO */ XW += x[10]*44.009950; /*CO2 */ XW += x[11]*30.026490; /*CH2O */ XW += x[12]*26.038240; /*C2H2 */ XW += x[13]*28.054180; /*C2H4 */ XW += x[14]*30.070120; /*C2H6 */ XW += x[15]*17.030610; /*NH3 */ XW += x[16]*30.006100; /*NO */ XW += x[17]*27.025820; /*HCN */ XW += x[18]*28.013400; /*N2 */ *rho = *P * XW / (8.31451e+07 * (*T)); /*rho = P*W/(R*T) */ return; } /*Compute rho = P*W(y)/RT */ void CKRHOY(double * restrict P, double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict rho) { double YOW = 0; double tmp[19]; for (int i = 0; i < 19; i++) { tmp[i] = y[i]*imw[i]; } for (int i = 0; i < 19; i++) { YOW += tmp[i]; } *rho = *P / (8.31451e+07 * (*T) * YOW);/*rho = P*W/(R*T) */ return; } /*Compute rho = P*W(c)/(R*T) */ void CKRHOC(double * restrict P, double * restrict T, double * restrict c, int * iwrk, double * restrict rwrk, double * restrict rho) { int id; /*loop counter */ /*See Eq 5 in CK Manual */ double W = 0; double sumC = 0; W += c[0]*2.015940; /*H2 */ W += c[1]*1.007970; /*H */ W += c[2]*31.998800; /*O2 */ W += c[3]*17.007370; /*OH */ W += c[4]*18.015340; /*H2O */ W += c[5]*33.006770; /*HO2 */ W += c[6]*34.014740; /*H2O2 */ W += c[7]*15.035060; /*CH3 */ W += c[8]*16.043030; /*CH4 */ W += c[9]*28.010550; /*CO */ W += c[10]*44.009950; /*CO2 */ W += c[11]*30.026490; /*CH2O */ W += c[12]*26.038240; /*C2H2 */ W += c[13]*28.054180; /*C2H4 */ W += c[14]*30.070120; /*C2H6 */ W += c[15]*17.030610; /*NH3 */ W += c[16]*30.006100; /*NO */ W += c[17]*27.025820; /*HCN */ W += c[18]*28.013400; /*N2 */ for (id = 0; id < 19; ++id) { sumC += c[id]; } *rho = *P * W / (sumC * (*T) * 8.31451e+07); /*rho = PW/(R*T) */ return; } /*get molecular weight for all species */ void CKWT(int * iwrk, double * restrict rwrk, double * restrict wt) { molecularWeight(wt); } /*get atomic weight for all elements */ void CKAWT(int * iwrk, double * restrict rwrk, double * restrict awt) { atomicWeight(awt); } /*given y[species]: mass fractions */ /*returns mean molecular weight (gm/mole) */ void CKMMWY(double * restrict y, int * iwrk, double * restrict rwrk, double * restrict wtm) { double YOW = 0; double tmp[19]; for (int i = 0; i < 19; i++) { tmp[i] = y[i]*imw[i]; } for (int i = 0; i < 19; i++) { YOW += tmp[i]; } *wtm = 1.0 / YOW; return; } /*given x[species]: mole fractions */ /*returns mean molecular weight (gm/mole) */ void CKMMWX(double * restrict x, int * iwrk, double * restrict rwrk, double * restrict wtm) { double XW = 0;/* see Eq 4 in CK Manual */ XW += x[0]*2.015940; /*H2 */ XW += x[1]*1.007970; /*H */ XW += x[2]*31.998800; /*O2 */ XW += x[3]*17.007370; /*OH */ XW += x[4]*18.015340; /*H2O */ XW += x[5]*33.006770; /*HO2 */ XW += x[6]*34.014740; /*H2O2 */ XW += x[7]*15.035060; /*CH3 */ XW += x[8]*16.043030; /*CH4 */ XW += x[9]*28.010550; /*CO */ XW += x[10]*44.009950; /*CO2 */ XW += x[11]*30.026490; /*CH2O */ XW += x[12]*26.038240; /*C2H2 */ XW += x[13]*28.054180; /*C2H4 */ XW += x[14]*30.070120; /*C2H6 */ XW += x[15]*17.030610; /*NH3 */ XW += x[16]*30.006100; /*NO */ XW += x[17]*27.025820; /*HCN */ XW += x[18]*28.013400; /*N2 */ *wtm = XW; return; } /*given c[species]: molar concentration */ /*returns mean molecular weight (gm/mole) */ void CKMMWC(double * restrict c, int * iwrk, double * restrict rwrk, double * restrict wtm) { int id; /*loop counter */ /*See Eq 5 in CK Manual */ double W = 0; double sumC = 0; W += c[0]*2.015940; /*H2 */ W += c[1]*1.007970; /*H */ W += c[2]*31.998800; /*O2 */ W += c[3]*17.007370; /*OH */ W += c[4]*18.015340; /*H2O */ W += c[5]*33.006770; /*HO2 */ W += c[6]*34.014740; /*H2O2 */ W += c[7]*15.035060; /*CH3 */ W += c[8]*16.043030; /*CH4 */ W += c[9]*28.010550; /*CO */ W += c[10]*44.009950; /*CO2 */ W += c[11]*30.026490; /*CH2O */ W += c[12]*26.038240; /*C2H2 */ W += c[13]*28.054180; /*C2H4 */ W += c[14]*30.070120; /*C2H6 */ W += c[15]*17.030610; /*NH3 */ W += c[16]*30.006100; /*NO */ W += c[17]*27.025820; /*HCN */ W += c[18]*28.013400; /*N2 */ for (id = 0; id < 19; ++id) { sumC += c[id]; } /* CK provides no guard against divison by zero */ *wtm = W/sumC; return; } /*convert y[species] (mass fracs) to x[species] (mole fracs) */ void CKYTX(double * restrict y, int * iwrk, double * restrict rwrk, double * restrict x) { double YOW = 0; double tmp[19]; for (int i = 0; i < 19; i++) { tmp[i] = y[i]*imw[i]; } for (int i = 0; i < 19; i++) { YOW += tmp[i]; } double YOWINV = 1.0/YOW; for (int i = 0; i < 19; i++) { x[i] = y[i]*imw[i]*YOWINV; } return; } /*convert y[npoints*species] (mass fracs) to x[npoints*species] (mole fracs) */ void VCKYTX(int * restrict np, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict x) { double YOW[*np]; for (int i=0; i<(*np); i++) { YOW[i] = 0.0; } for (int n=0; n<19; n++) { for (int i=0; i<(*np); i++) { x[n*(*np)+i] = y[n*(*np)+i] * imw[n]; YOW[i] += x[n*(*np)+i]; } } for (int i=0; i<(*np); i++) { YOW[i] = 1.0/YOW[i]; } for (int n=0; n<19; n++) { for (int i=0; i<(*np); i++) { x[n*(*np)+i] *= YOW[i]; } } } /*convert y[species] (mass fracs) to c[species] (molar conc) */ void CKYTCP(double * restrict P, double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict c) { double YOW = 0; double PWORT; /*Compute inverse of mean molecular wt first */ for (int i = 0; i < 19; i++) { c[i] = y[i]*imw[i]; } for (int i = 0; i < 19; i++) { YOW += c[i]; } /*PW/RT (see Eq. 7) */ PWORT = (*P)/(YOW * 8.31451e+07 * (*T)); /*Now compute conversion */ for (int i = 0; i < 19; i++) { c[i] = PWORT * y[i] * imw[i]; } return; } /*convert y[species] (mass fracs) to c[species] (molar conc) */ void CKYTCR(double * restrict rho, double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict c) { for (int i = 0; i < 19; i++) { c[i] = (*rho) * y[i] * imw[i]; } } /*convert x[species] (mole fracs) to y[species] (mass fracs) */ void CKXTY(double * restrict x, int * iwrk, double * restrict rwrk, double * restrict y) { double XW = 0; /*See Eq 4, 9 in CK Manual */ /*Compute mean molecular wt first */ XW += x[0]*2.015940; /*H2 */ XW += x[1]*1.007970; /*H */ XW += x[2]*31.998800; /*O2 */ XW += x[3]*17.007370; /*OH */ XW += x[4]*18.015340; /*H2O */ XW += x[5]*33.006770; /*HO2 */ XW += x[6]*34.014740; /*H2O2 */ XW += x[7]*15.035060; /*CH3 */ XW += x[8]*16.043030; /*CH4 */ XW += x[9]*28.010550; /*CO */ XW += x[10]*44.009950; /*CO2 */ XW += x[11]*30.026490; /*CH2O */ XW += x[12]*26.038240; /*C2H2 */ XW += x[13]*28.054180; /*C2H4 */ XW += x[14]*30.070120; /*C2H6 */ XW += x[15]*17.030610; /*NH3 */ XW += x[16]*30.006100; /*NO */ XW += x[17]*27.025820; /*HCN */ XW += x[18]*28.013400; /*N2 */ /*Now compute conversion */ double XWinv = 1.0/XW; y[0] = x[0]*2.015940*XWinv; y[1] = x[1]*1.007970*XWinv; y[2] = x[2]*31.998800*XWinv; y[3] = x[3]*17.007370*XWinv; y[4] = x[4]*18.015340*XWinv; y[5] = x[5]*33.006770*XWinv; y[6] = x[6]*34.014740*XWinv; y[7] = x[7]*15.035060*XWinv; y[8] = x[8]*16.043030*XWinv; y[9] = x[9]*28.010550*XWinv; y[10] = x[10]*44.009950*XWinv; y[11] = x[11]*30.026490*XWinv; y[12] = x[12]*26.038240*XWinv; y[13] = x[13]*28.054180*XWinv; y[14] = x[14]*30.070120*XWinv; y[15] = x[15]*17.030610*XWinv; y[16] = x[16]*30.006100*XWinv; y[17] = x[17]*27.025820*XWinv; y[18] = x[18]*28.013400*XWinv; return; } /*convert x[species] (mole fracs) to c[species] (molar conc) */ void CKXTCP(double * restrict P, double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict c) { int id; /*loop counter */ double PORT = (*P)/(8.31451e+07 * (*T)); /*P/RT */ /*Compute conversion, see Eq 10 */ for (id = 0; id < 19; ++id) { c[id] = x[id]*PORT; } return; } /*convert x[species] (mole fracs) to c[species] (molar conc) */ void CKXTCR(double * restrict rho, double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict c) { int id; /*loop counter */ double XW = 0; /*See Eq 4, 11 in CK Manual */ double ROW; /*Compute mean molecular wt first */ XW += x[0]*2.015940; /*H2 */ XW += x[1]*1.007970; /*H */ XW += x[2]*31.998800; /*O2 */ XW += x[3]*17.007370; /*OH */ XW += x[4]*18.015340; /*H2O */ XW += x[5]*33.006770; /*HO2 */ XW += x[6]*34.014740; /*H2O2 */ XW += x[7]*15.035060; /*CH3 */ XW += x[8]*16.043030; /*CH4 */ XW += x[9]*28.010550; /*CO */ XW += x[10]*44.009950; /*CO2 */ XW += x[11]*30.026490; /*CH2O */ XW += x[12]*26.038240; /*C2H2 */ XW += x[13]*28.054180; /*C2H4 */ XW += x[14]*30.070120; /*C2H6 */ XW += x[15]*17.030610; /*NH3 */ XW += x[16]*30.006100; /*NO */ XW += x[17]*27.025820; /*HCN */ XW += x[18]*28.013400; /*N2 */ ROW = (*rho) / XW; /*Compute conversion, see Eq 11 */ for (id = 0; id < 19; ++id) { c[id] = x[id]*ROW; } return; } /*convert c[species] (molar conc) to x[species] (mole fracs) */ void CKCTX(double * restrict c, int * iwrk, double * restrict rwrk, double * restrict x) { int id; /*loop counter */ double sumC = 0; /*compute sum of c */ for (id = 0; id < 19; ++id) { sumC += c[id]; } /* See Eq 13 */ double sumCinv = 1.0/sumC; for (id = 0; id < 19; ++id) { x[id] = c[id]*sumCinv; } return; } /*convert c[species] (molar conc) to y[species] (mass fracs) */ void CKCTY(double * restrict c, int * iwrk, double * restrict rwrk, double * restrict y) { double CW = 0; /*See Eq 12 in CK Manual */ /*compute denominator in eq 12 first */ CW += c[0]*2.015940; /*H2 */ CW += c[1]*1.007970; /*H */ CW += c[2]*31.998800; /*O2 */ CW += c[3]*17.007370; /*OH */ CW += c[4]*18.015340; /*H2O */ CW += c[5]*33.006770; /*HO2 */ CW += c[6]*34.014740; /*H2O2 */ CW += c[7]*15.035060; /*CH3 */ CW += c[8]*16.043030; /*CH4 */ CW += c[9]*28.010550; /*CO */ CW += c[10]*44.009950; /*CO2 */ CW += c[11]*30.026490; /*CH2O */ CW += c[12]*26.038240; /*C2H2 */ CW += c[13]*28.054180; /*C2H4 */ CW += c[14]*30.070120; /*C2H6 */ CW += c[15]*17.030610; /*NH3 */ CW += c[16]*30.006100; /*NO */ CW += c[17]*27.025820; /*HCN */ CW += c[18]*28.013400; /*N2 */ /*Now compute conversion */ double CWinv = 1.0/CW; y[0] = c[0]*2.015940*CWinv; y[1] = c[1]*1.007970*CWinv; y[2] = c[2]*31.998800*CWinv; y[3] = c[3]*17.007370*CWinv; y[4] = c[4]*18.015340*CWinv; y[5] = c[5]*33.006770*CWinv; y[6] = c[6]*34.014740*CWinv; y[7] = c[7]*15.035060*CWinv; y[8] = c[8]*16.043030*CWinv; y[9] = c[9]*28.010550*CWinv; y[10] = c[10]*44.009950*CWinv; y[11] = c[11]*30.026490*CWinv; y[12] = c[12]*26.038240*CWinv; y[13] = c[13]*28.054180*CWinv; y[14] = c[14]*30.070120*CWinv; y[15] = c[15]*17.030610*CWinv; y[16] = c[16]*30.006100*CWinv; y[17] = c[17]*27.025820*CWinv; y[18] = c[18]*28.013400*CWinv; return; } /*get Cp/R as a function of T */ /*for all species (Eq 19) */ void CKCPOR(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict cpor) { double tT = *T; /*temporary temperature */ double tc[] = { 0, tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; /*temperature cache */ cp_R(cpor, tc); } /*get H/RT as a function of T */ /*for all species (Eq 20) */ void CKHORT(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict hort) { double tT = *T; /*temporary temperature */ double tc[] = { 0, tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; /*temperature cache */ speciesEnthalpy(hort, tc); } /*get S/R as a function of T */ /*for all species (Eq 21) */ void CKSOR(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict sor) { double tT = *T; /*temporary temperature */ double tc[] = { log(tT), tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; /*temperature cache */ speciesEntropy(sor, tc); } /*get specific heat at constant volume as a function */ /*of T for all species (molar units) */ void CKCVML(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict cvml) { int id; /*loop counter */ double tT = *T; /*temporary temperature */ double tc[] = { 0, tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; /*temperature cache */ cv_R(cvml, tc); /*convert to chemkin units */ for (id = 0; id < 19; ++id) { cvml[id] *= 8.31451e+07; } } /*get specific heat at constant pressure as a */ /*function of T for all species (molar units) */ void CKCPML(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict cpml) { int id; /*loop counter */ double tT = *T; /*temporary temperature */ double tc[] = { 0, tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; /*temperature cache */ cp_R(cpml, tc); /*convert to chemkin units */ for (id = 0; id < 19; ++id) { cpml[id] *= 8.31451e+07; } } /*get internal energy as a function */ /*of T for all species (molar units) */ void CKUML(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict uml) { int id; /*loop counter */ double tT = *T; /*temporary temperature */ double tc[] = { 0, tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; /*temperature cache */ double RT = 8.31451e+07*tT; /*R*T */ speciesInternalEnergy(uml, tc); /*convert to chemkin units */ for (id = 0; id < 19; ++id) { uml[id] *= RT; } } /*get enthalpy as a function */ /*of T for all species (molar units) */ void CKHML(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict hml) { int id; /*loop counter */ double tT = *T; /*temporary temperature */ double tc[] = { 0, tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; /*temperature cache */ double RT = 8.31451e+07*tT; /*R*T */ speciesEnthalpy(hml, tc); /*convert to chemkin units */ for (id = 0; id < 19; ++id) { hml[id] *= RT; } } /*get standard-state Gibbs energy as a function */ /*of T for all species (molar units) */ void CKGML(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict gml) { int id; /*loop counter */ double tT = *T; /*temporary temperature */ double tc[] = { log(tT), tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; /*temperature cache */ double RT = 8.31451e+07*tT; /*R*T */ gibbs(gml, tc); /*convert to chemkin units */ for (id = 0; id < 19; ++id) { gml[id] *= RT; } } /*get standard-state Helmholtz free energy as a */ /*function of T for all species (molar units) */ void CKAML(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict aml) { int id; /*loop counter */ double tT = *T; /*temporary temperature */ double tc[] = { log(tT), tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; /*temperature cache */ double RT = 8.31451e+07*tT; /*R*T */ helmholtz(aml, tc); /*convert to chemkin units */ for (id = 0; id < 19; ++id) { aml[id] *= RT; } } /*Returns the standard-state entropies in molar units */ void CKSML(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict sml) { int id; /*loop counter */ double tT = *T; /*temporary temperature */ double tc[] = { log(tT), tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; /*temperature cache */ speciesEntropy(sml, tc); /*convert to chemkin units */ for (id = 0; id < 19; ++id) { sml[id] *= 8.31451e+07; } } /*Returns the specific heats at constant volume */ /*in mass units (Eq. 29) */ void CKCVMS(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict cvms) { double tT = *T; /*temporary temperature */ double tc[] = { 0, tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; /*temperature cache */ cv_R(cvms, tc); /*multiply by R/molecularweight */ cvms[0] *= 4.124383662212169e+07; /*H2 */ cvms[1] *= 8.248767324424338e+07; /*H */ cvms[2] *= 2.598381814318037e+06; /*O2 */ cvms[3] *= 4.888768810227566e+06; /*OH */ cvms[4] *= 4.615239012974499e+06; /*H2O */ cvms[5] *= 2.519031701678171e+06; /*HO2 */ cvms[6] *= 2.444384405113783e+06; /*H2O2 */ cvms[7] *= 5.530081023953346e+06; /*CH3 */ cvms[8] *= 5.182630712527496e+06; /*CH4 */ cvms[9] *= 2.968349425484326e+06; /*CO */ cvms[10] *= 1.889234139098090e+06; /*CO2 */ cvms[11] *= 2.769058254894261e+06; /*CH2O */ cvms[12] *= 3.193192012977835e+06; /*C2H2 */ cvms[13] *= 2.963733033722604e+06; /*C2H4 */ cvms[14] *= 2.765040511976673e+06; /*C2H6 */ cvms[15] *= 4.882097587813943e+06; /*NH3 */ cvms[16] *= 2.770939908885194e+06; /*NO */ cvms[17] *= 3.076506096762281e+06; /*HCN */ cvms[18] *= 2.968047434442088e+06; /*N2 */ } /*Returns the specific heats at constant pressure */ /*in mass units (Eq. 26) */ void CKCPMS(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict cpms) { double tT = *T; /*temporary temperature */ double tc[] = { 0, tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; /*temperature cache */ cp_R(cpms, tc); /*multiply by R/molecularweight */ cpms[0] *= 4.124383662212169e+07; /*H2 */ cpms[1] *= 8.248767324424338e+07; /*H */ cpms[2] *= 2.598381814318037e+06; /*O2 */ cpms[3] *= 4.888768810227566e+06; /*OH */ cpms[4] *= 4.615239012974499e+06; /*H2O */ cpms[5] *= 2.519031701678171e+06; /*HO2 */ cpms[6] *= 2.444384405113783e+06; /*H2O2 */ cpms[7] *= 5.530081023953346e+06; /*CH3 */ cpms[8] *= 5.182630712527496e+06; /*CH4 */ cpms[9] *= 2.968349425484326e+06; /*CO */ cpms[10] *= 1.889234139098090e+06; /*CO2 */ cpms[11] *= 2.769058254894261e+06; /*CH2O */ cpms[12] *= 3.193192012977835e+06; /*C2H2 */ cpms[13] *= 2.963733033722604e+06; /*C2H4 */ cpms[14] *= 2.765040511976673e+06; /*C2H6 */ cpms[15] *= 4.882097587813943e+06; /*NH3 */ cpms[16] *= 2.770939908885194e+06; /*NO */ cpms[17] *= 3.076506096762281e+06; /*HCN */ cpms[18] *= 2.968047434442088e+06; /*N2 */ } /*Returns internal energy in mass units (Eq 30.) */ void CKUMS(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict ums) { double tT = *T; /*temporary temperature */ double tc[] = { 0, tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; /*temperature cache */ double RT = 8.31451e+07*tT; /*R*T */ speciesInternalEnergy(ums, tc); for (int i = 0; i < 19; i++) { ums[i] *= RT*imw[i]; } } /*Returns enthalpy in mass units (Eq 27.) */ void CKHMS(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict hms) { double tT = *T; /*temporary temperature */ double tc[] = { 0, tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; /*temperature cache */ double RT = 8.31451e+07*tT; /*R*T */ speciesEnthalpy(hms, tc); for (int i = 0; i < 19; i++) { hms[i] *= RT*imw[i]; } } /*Returns enthalpy in mass units (Eq 27.) */ void VCKHMS(int * restrict np, double * restrict T, int * iwrk, double * restrict rwrk, double * restrict hms) { double tc[5], h[19]; for (int i=0; i<(*np); i++) { tc[0] = 0.0; tc[1] = T[i]; tc[2] = T[i]*T[i]; tc[3] = T[i]*T[i]*T[i]; tc[4] = T[i]*T[i]*T[i]*T[i]; speciesEnthalpy(h, tc); hms[0*(*np)+i] = h[0]; hms[1*(*np)+i] = h[1]; hms[2*(*np)+i] = h[2]; hms[3*(*np)+i] = h[3]; hms[4*(*np)+i] = h[4]; hms[5*(*np)+i] = h[5]; hms[6*(*np)+i] = h[6]; hms[7*(*np)+i] = h[7]; hms[8*(*np)+i] = h[8]; hms[9*(*np)+i] = h[9]; hms[10*(*np)+i] = h[10]; hms[11*(*np)+i] = h[11]; hms[12*(*np)+i] = h[12]; hms[13*(*np)+i] = h[13]; hms[14*(*np)+i] = h[14]; hms[15*(*np)+i] = h[15]; hms[16*(*np)+i] = h[16]; hms[17*(*np)+i] = h[17]; hms[18*(*np)+i] = h[18]; } for (int n=0; n<19; n++) { for (int i=0; i<(*np); i++) { hms[n*(*np)+i] *= 8.31451e+07 * T[i] * imw[n]; } } } /*Returns gibbs in mass units (Eq 31.) */ void CKGMS(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict gms) { double tT = *T; /*temporary temperature */ double tc[] = { log(tT), tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; /*temperature cache */ double RT = 8.31451e+07*tT; /*R*T */ gibbs(gms, tc); for (int i = 0; i < 19; i++) { gms[i] *= RT*imw[i]; } } /*Returns helmholtz in mass units (Eq 32.) */ void CKAMS(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict ams) { double tT = *T; /*temporary temperature */ double tc[] = { log(tT), tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; /*temperature cache */ double RT = 8.31451e+07*tT; /*R*T */ helmholtz(ams, tc); for (int i = 0; i < 19; i++) { ams[i] *= RT*imw[i]; } } /*Returns the entropies in mass units (Eq 28.) */ void CKSMS(double * restrict T, int * iwrk, double * restrict rwrk, double * restrict sms) { double tT = *T; /*temporary temperature */ double tc[] = { log(tT), tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; /*temperature cache */ speciesEntropy(sms, tc); /*multiply by R/molecularweight */ sms[0] *= 4.124383662212169e+07; /*H2 */ sms[1] *= 8.248767324424338e+07; /*H */ sms[2] *= 2.598381814318037e+06; /*O2 */ sms[3] *= 4.888768810227566e+06; /*OH */ sms[4] *= 4.615239012974499e+06; /*H2O */ sms[5] *= 2.519031701678171e+06; /*HO2 */ sms[6] *= 2.444384405113783e+06; /*H2O2 */ sms[7] *= 5.530081023953346e+06; /*CH3 */ sms[8] *= 5.182630712527496e+06; /*CH4 */ sms[9] *= 2.968349425484326e+06; /*CO */ sms[10] *= 1.889234139098090e+06; /*CO2 */ sms[11] *= 2.769058254894261e+06; /*CH2O */ sms[12] *= 3.193192012977835e+06; /*C2H2 */ sms[13] *= 2.963733033722604e+06; /*C2H4 */ sms[14] *= 2.765040511976673e+06; /*C2H6 */ sms[15] *= 4.882097587813943e+06; /*NH3 */ sms[16] *= 2.770939908885194e+06; /*NO */ sms[17] *= 3.076506096762281e+06; /*HCN */ sms[18] *= 2.968047434442088e+06; /*N2 */ } /*Returns the mean specific heat at CP (Eq. 33) */ void CKCPBL(double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict cpbl) { int id; /*loop counter */ double result = 0; double tT = *T; /*temporary temperature */ double tc[] = { 0, tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; /*temperature cache */ double cpor[19]; /* temporary storage */ cp_R(cpor, tc); /*perform dot product */ for (id = 0; id < 19; ++id) { result += x[id]*cpor[id]; } *cpbl = result * 8.31451e+07; } /*Returns the mean specific heat at CP (Eq. 34) */ void CKCPBS(double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict cpbs) { double result = 0; double tT = *T; /*temporary temperature */ double tc[] = { 0, tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; /*temperature cache */ double cpor[19], tresult[19]; /* temporary storage */ cp_R(cpor, tc); for (int i = 0; i < 19; i++) { tresult[i] = cpor[i]*y[i]*imw[i]; } for (int i = 0; i < 19; i++) { result += tresult[i]; } *cpbs = result * 8.31451e+07; } /*Returns the mean specific heat at CV (Eq. 35) */ void CKCVBL(double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict cvbl) { int id; /*loop counter */ double result = 0; double tT = *T; /*temporary temperature */ double tc[] = { 0, tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; /*temperature cache */ double cvor[19]; /* temporary storage */ cv_R(cvor, tc); /*perform dot product */ for (id = 0; id < 19; ++id) { result += x[id]*cvor[id]; } *cvbl = result * 8.31451e+07; } /*Returns the mean specific heat at CV (Eq. 36) */ void CKCVBS(double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict cvbs) { double result = 0; double tT = *T; /*temporary temperature */ double tc[] = { 0, tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; /*temperature cache */ double cvor[19]; /* temporary storage */ cv_R(cvor, tc); /*multiply by y/molecularweight */ result += cvor[0]*y[0]*imw[0]; /*H2 */ result += cvor[1]*y[1]*imw[1]; /*H */ result += cvor[2]*y[2]*imw[2]; /*O2 */ result += cvor[3]*y[3]*imw[3]; /*OH */ result += cvor[4]*y[4]*imw[4]; /*H2O */ result += cvor[5]*y[5]*imw[5]; /*HO2 */ result += cvor[6]*y[6]*imw[6]; /*H2O2 */ result += cvor[7]*y[7]*imw[7]; /*CH3 */ result += cvor[8]*y[8]*imw[8]; /*CH4 */ result += cvor[9]*y[9]*imw[9]; /*CO */ result += cvor[10]*y[10]*imw[10]; /*CO2 */ result += cvor[11]*y[11]*imw[11]; /*CH2O */ result += cvor[12]*y[12]*imw[12]; /*C2H2 */ result += cvor[13]*y[13]*imw[13]; /*C2H4 */ result += cvor[14]*y[14]*imw[14]; /*C2H6 */ result += cvor[15]*y[15]*imw[15]; /*NH3 */ result += cvor[16]*y[16]*imw[16]; /*NO */ result += cvor[17]*y[17]*imw[17]; /*HCN */ result += cvor[18]*y[18]*imw[18]; /*N2 */ *cvbs = result * 8.31451e+07; } /*Returns the mean enthalpy of the mixture in molar units */ void CKHBML(double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict hbml) { int id; /*loop counter */ double result = 0; double tT = *T; /*temporary temperature */ double tc[] = { 0, tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; /*temperature cache */ double hml[19]; /* temporary storage */ double RT = 8.31451e+07*tT; /*R*T */ speciesEnthalpy(hml, tc); /*perform dot product */ for (id = 0; id < 19; ++id) { result += x[id]*hml[id]; } *hbml = result * RT; } /*Returns mean enthalpy of mixture in mass units */ void CKHBMS(double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict hbms) { double result = 0; double tT = *T; /*temporary temperature */ double tc[] = { 0, tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; /*temperature cache */ double hml[19], tmp[19]; /* temporary storage */ double RT = 8.31451e+07*tT; /*R*T */ speciesEnthalpy(hml, tc); int id; for (id = 0; id < 19; ++id) { tmp[id] = y[id]*hml[id]*imw[id]; } for (id = 0; id < 19; ++id) { result += tmp[id]; } *hbms = result * RT; } /*get mean internal energy in molar units */ void CKUBML(double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict ubml) { int id; /*loop counter */ double result = 0; double tT = *T; /*temporary temperature */ double tc[] = { 0, tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; /*temperature cache */ double uml[19]; /* temporary energy array */ double RT = 8.31451e+07*tT; /*R*T */ speciesInternalEnergy(uml, tc); /*perform dot product */ for (id = 0; id < 19; ++id) { result += x[id]*uml[id]; } *ubml = result * RT; } /*get mean internal energy in mass units */ void CKUBMS(double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict ubms) { double result = 0; double tT = *T; /*temporary temperature */ double tc[] = { 0, tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; /*temperature cache */ double ums[19]; /* temporary energy array */ double RT = 8.31451e+07*tT; /*R*T */ speciesInternalEnergy(ums, tc); /*perform dot product + scaling by wt */ result += y[0]*ums[0]*imw[0]; /*H2 */ result += y[1]*ums[1]*imw[1]; /*H */ result += y[2]*ums[2]*imw[2]; /*O2 */ result += y[3]*ums[3]*imw[3]; /*OH */ result += y[4]*ums[4]*imw[4]; /*H2O */ result += y[5]*ums[5]*imw[5]; /*HO2 */ result += y[6]*ums[6]*imw[6]; /*H2O2 */ result += y[7]*ums[7]*imw[7]; /*CH3 */ result += y[8]*ums[8]*imw[8]; /*CH4 */ result += y[9]*ums[9]*imw[9]; /*CO */ result += y[10]*ums[10]*imw[10]; /*CO2 */ result += y[11]*ums[11]*imw[11]; /*CH2O */ result += y[12]*ums[12]*imw[12]; /*C2H2 */ result += y[13]*ums[13]*imw[13]; /*C2H4 */ result += y[14]*ums[14]*imw[14]; /*C2H6 */ result += y[15]*ums[15]*imw[15]; /*NH3 */ result += y[16]*ums[16]*imw[16]; /*NO */ result += y[17]*ums[17]*imw[17]; /*HCN */ result += y[18]*ums[18]*imw[18]; /*N2 */ *ubms = result * RT; } /*get mixture entropy in molar units */ void CKSBML(double * restrict P, double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict sbml) { int id; /*loop counter */ double result = 0; /*Log of normalized pressure in cgs units dynes/cm^2 by Patm */ double logPratio = log ( *P / 1013250.0 ); double tT = *T; /*temporary temperature */ double tc[] = { log(tT), tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; /*temperature cache */ double sor[19]; /* temporary storage */ speciesEntropy(sor, tc); /*Compute Eq 42 */ for (id = 0; id < 19; ++id) { result += x[id]*(sor[id]-log((x[id]+1e-100))-logPratio); } *sbml = result * 8.31451e+07; } /*get mixture entropy in mass units */ void CKSBMS(double * restrict P, double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict sbms) { double result = 0; /*Log of normalized pressure in cgs units dynes/cm^2 by Patm */ double logPratio = log ( *P / 1013250.0 ); double tT = *T; /*temporary temperature */ double tc[] = { log(tT), tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; /*temperature cache */ double sor[19]; /* temporary storage */ double x[19]; /* need a ytx conversion */ double YOW = 0; /*See Eq 4, 6 in CK Manual */ /*Compute inverse of mean molecular wt first */ YOW += y[0]*imw[0]; /*H2 */ YOW += y[1]*imw[1]; /*H */ YOW += y[2]*imw[2]; /*O2 */ YOW += y[3]*imw[3]; /*OH */ YOW += y[4]*imw[4]; /*H2O */ YOW += y[5]*imw[5]; /*HO2 */ YOW += y[6]*imw[6]; /*H2O2 */ YOW += y[7]*imw[7]; /*CH3 */ YOW += y[8]*imw[8]; /*CH4 */ YOW += y[9]*imw[9]; /*CO */ YOW += y[10]*imw[10]; /*CO2 */ YOW += y[11]*imw[11]; /*CH2O */ YOW += y[12]*imw[12]; /*C2H2 */ YOW += y[13]*imw[13]; /*C2H4 */ YOW += y[14]*imw[14]; /*C2H6 */ YOW += y[15]*imw[15]; /*NH3 */ YOW += y[16]*imw[16]; /*NO */ YOW += y[17]*imw[17]; /*HCN */ YOW += y[18]*imw[18]; /*N2 */ /*Now compute y to x conversion */ x[0] = y[0]/(2.015940*YOW); x[1] = y[1]/(1.007970*YOW); x[2] = y[2]/(31.998800*YOW); x[3] = y[3]/(17.007370*YOW); x[4] = y[4]/(18.015340*YOW); x[5] = y[5]/(33.006770*YOW); x[6] = y[6]/(34.014740*YOW); x[7] = y[7]/(15.035060*YOW); x[8] = y[8]/(16.043030*YOW); x[9] = y[9]/(28.010550*YOW); x[10] = y[10]/(44.009950*YOW); x[11] = y[11]/(30.026490*YOW); x[12] = y[12]/(26.038240*YOW); x[13] = y[13]/(28.054180*YOW); x[14] = y[14]/(30.070120*YOW); x[15] = y[15]/(17.030610*YOW); x[16] = y[16]/(30.006100*YOW); x[17] = y[17]/(27.025820*YOW); x[18] = y[18]/(28.013400*YOW); speciesEntropy(sor, tc); /*Perform computation in Eq 42 and 43 */ result += x[0]*(sor[0]-log((x[0]+1e-100))-logPratio); result += x[1]*(sor[1]-log((x[1]+1e-100))-logPratio); result += x[2]*(sor[2]-log((x[2]+1e-100))-logPratio); result += x[3]*(sor[3]-log((x[3]+1e-100))-logPratio); result += x[4]*(sor[4]-log((x[4]+1e-100))-logPratio); result += x[5]*(sor[5]-log((x[5]+1e-100))-logPratio); result += x[6]*(sor[6]-log((x[6]+1e-100))-logPratio); result += x[7]*(sor[7]-log((x[7]+1e-100))-logPratio); result += x[8]*(sor[8]-log((x[8]+1e-100))-logPratio); result += x[9]*(sor[9]-log((x[9]+1e-100))-logPratio); result += x[10]*(sor[10]-log((x[10]+1e-100))-logPratio); result += x[11]*(sor[11]-log((x[11]+1e-100))-logPratio); result += x[12]*(sor[12]-log((x[12]+1e-100))-logPratio); result += x[13]*(sor[13]-log((x[13]+1e-100))-logPratio); result += x[14]*(sor[14]-log((x[14]+1e-100))-logPratio); result += x[15]*(sor[15]-log((x[15]+1e-100))-logPratio); result += x[16]*(sor[16]-log((x[16]+1e-100))-logPratio); result += x[17]*(sor[17]-log((x[17]+1e-100))-logPratio); result += x[18]*(sor[18]-log((x[18]+1e-100))-logPratio); /*Scale by R/W */ *sbms = result * 8.31451e+07 * YOW; } /*Returns mean gibbs free energy in molar units */ void CKGBML(double * restrict P, double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict gbml) { int id; /*loop counter */ double result = 0; /*Log of normalized pressure in cgs units dynes/cm^2 by Patm */ double logPratio = log ( *P / 1013250.0 ); double tT = *T; /*temporary temperature */ double tc[] = { log(tT), tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; /*temperature cache */ double RT = 8.31451e+07*tT; /*R*T */ double gort[19]; /* temporary storage */ /*Compute g/RT */ gibbs(gort, tc); /*Compute Eq 44 */ for (id = 0; id < 19; ++id) { result += x[id]*(gort[id]+log((x[id]+1e-100))+logPratio); } *gbml = result * RT; } /*Returns mixture gibbs free energy in mass units */ void CKGBMS(double * restrict P, double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict gbms) { double result = 0; /*Log of normalized pressure in cgs units dynes/cm^2 by Patm */ double logPratio = log ( *P / 1013250.0 ); double tT = *T; /*temporary temperature */ double tc[] = { log(tT), tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; /*temperature cache */ double RT = 8.31451e+07*tT; /*R*T */ double gort[19]; /* temporary storage */ double x[19]; /* need a ytx conversion */ double YOW = 0; /*To hold 1/molecularweight */ /*Compute inverse of mean molecular wt first */ YOW += y[0]*imw[0]; /*H2 */ YOW += y[1]*imw[1]; /*H */ YOW += y[2]*imw[2]; /*O2 */ YOW += y[3]*imw[3]; /*OH */ YOW += y[4]*imw[4]; /*H2O */ YOW += y[5]*imw[5]; /*HO2 */ YOW += y[6]*imw[6]; /*H2O2 */ YOW += y[7]*imw[7]; /*CH3 */ YOW += y[8]*imw[8]; /*CH4 */ YOW += y[9]*imw[9]; /*CO */ YOW += y[10]*imw[10]; /*CO2 */ YOW += y[11]*imw[11]; /*CH2O */ YOW += y[12]*imw[12]; /*C2H2 */ YOW += y[13]*imw[13]; /*C2H4 */ YOW += y[14]*imw[14]; /*C2H6 */ YOW += y[15]*imw[15]; /*NH3 */ YOW += y[16]*imw[16]; /*NO */ YOW += y[17]*imw[17]; /*HCN */ YOW += y[18]*imw[18]; /*N2 */ /*Now compute y to x conversion */ x[0] = y[0]/(2.015940*YOW); x[1] = y[1]/(1.007970*YOW); x[2] = y[2]/(31.998800*YOW); x[3] = y[3]/(17.007370*YOW); x[4] = y[4]/(18.015340*YOW); x[5] = y[5]/(33.006770*YOW); x[6] = y[6]/(34.014740*YOW); x[7] = y[7]/(15.035060*YOW); x[8] = y[8]/(16.043030*YOW); x[9] = y[9]/(28.010550*YOW); x[10] = y[10]/(44.009950*YOW); x[11] = y[11]/(30.026490*YOW); x[12] = y[12]/(26.038240*YOW); x[13] = y[13]/(28.054180*YOW); x[14] = y[14]/(30.070120*YOW); x[15] = y[15]/(17.030610*YOW); x[16] = y[16]/(30.006100*YOW); x[17] = y[17]/(27.025820*YOW); x[18] = y[18]/(28.013400*YOW); gibbs(gort, tc); /*Perform computation in Eq 44 */ result += x[0]*(gort[0]+log((x[0]+1e-100))+logPratio); result += x[1]*(gort[1]+log((x[1]+1e-100))+logPratio); result += x[2]*(gort[2]+log((x[2]+1e-100))+logPratio); result += x[3]*(gort[3]+log((x[3]+1e-100))+logPratio); result += x[4]*(gort[4]+log((x[4]+1e-100))+logPratio); result += x[5]*(gort[5]+log((x[5]+1e-100))+logPratio); result += x[6]*(gort[6]+log((x[6]+1e-100))+logPratio); result += x[7]*(gort[7]+log((x[7]+1e-100))+logPratio); result += x[8]*(gort[8]+log((x[8]+1e-100))+logPratio); result += x[9]*(gort[9]+log((x[9]+1e-100))+logPratio); result += x[10]*(gort[10]+log((x[10]+1e-100))+logPratio); result += x[11]*(gort[11]+log((x[11]+1e-100))+logPratio); result += x[12]*(gort[12]+log((x[12]+1e-100))+logPratio); result += x[13]*(gort[13]+log((x[13]+1e-100))+logPratio); result += x[14]*(gort[14]+log((x[14]+1e-100))+logPratio); result += x[15]*(gort[15]+log((x[15]+1e-100))+logPratio); result += x[16]*(gort[16]+log((x[16]+1e-100))+logPratio); result += x[17]*(gort[17]+log((x[17]+1e-100))+logPratio); result += x[18]*(gort[18]+log((x[18]+1e-100))+logPratio); /*Scale by RT/W */ *gbms = result * RT * YOW; } /*Returns mean helmholtz free energy in molar units */ void CKABML(double * restrict P, double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict abml) { int id; /*loop counter */ double result = 0; /*Log of normalized pressure in cgs units dynes/cm^2 by Patm */ double logPratio = log ( *P / 1013250.0 ); double tT = *T; /*temporary temperature */ double tc[] = { log(tT), tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; /*temperature cache */ double RT = 8.31451e+07*tT; /*R*T */ double aort[19]; /* temporary storage */ /*Compute g/RT */ helmholtz(aort, tc); /*Compute Eq 44 */ for (id = 0; id < 19; ++id) { result += x[id]*(aort[id]+log((x[id]+1e-100))+logPratio); } *abml = result * RT; } /*Returns mixture helmholtz free energy in mass units */ void CKABMS(double * restrict P, double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict abms) { double result = 0; /*Log of normalized pressure in cgs units dynes/cm^2 by Patm */ double logPratio = log ( *P / 1013250.0 ); double tT = *T; /*temporary temperature */ double tc[] = { log(tT), tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; /*temperature cache */ double RT = 8.31451e+07*tT; /*R*T */ double aort[19]; /* temporary storage */ double x[19]; /* need a ytx conversion */ double YOW = 0; /*To hold 1/molecularweight */ /*Compute inverse of mean molecular wt first */ YOW += y[0]*imw[0]; /*H2 */ YOW += y[1]*imw[1]; /*H */ YOW += y[2]*imw[2]; /*O2 */ YOW += y[3]*imw[3]; /*OH */ YOW += y[4]*imw[4]; /*H2O */ YOW += y[5]*imw[5]; /*HO2 */ YOW += y[6]*imw[6]; /*H2O2 */ YOW += y[7]*imw[7]; /*CH3 */ YOW += y[8]*imw[8]; /*CH4 */ YOW += y[9]*imw[9]; /*CO */ YOW += y[10]*imw[10]; /*CO2 */ YOW += y[11]*imw[11]; /*CH2O */ YOW += y[12]*imw[12]; /*C2H2 */ YOW += y[13]*imw[13]; /*C2H4 */ YOW += y[14]*imw[14]; /*C2H6 */ YOW += y[15]*imw[15]; /*NH3 */ YOW += y[16]*imw[16]; /*NO */ YOW += y[17]*imw[17]; /*HCN */ YOW += y[18]*imw[18]; /*N2 */ /*Now compute y to x conversion */ x[0] = y[0]/(2.015940*YOW); x[1] = y[1]/(1.007970*YOW); x[2] = y[2]/(31.998800*YOW); x[3] = y[3]/(17.007370*YOW); x[4] = y[4]/(18.015340*YOW); x[5] = y[5]/(33.006770*YOW); x[6] = y[6]/(34.014740*YOW); x[7] = y[7]/(15.035060*YOW); x[8] = y[8]/(16.043030*YOW); x[9] = y[9]/(28.010550*YOW); x[10] = y[10]/(44.009950*YOW); x[11] = y[11]/(30.026490*YOW); x[12] = y[12]/(26.038240*YOW); x[13] = y[13]/(28.054180*YOW); x[14] = y[14]/(30.070120*YOW); x[15] = y[15]/(17.030610*YOW); x[16] = y[16]/(30.006100*YOW); x[17] = y[17]/(27.025820*YOW); x[18] = y[18]/(28.013400*YOW); helmholtz(aort, tc); /*Perform computation in Eq 44 */ result += x[0]*(aort[0]+log((x[0]+1e-100))+logPratio); result += x[1]*(aort[1]+log((x[1]+1e-100))+logPratio); result += x[2]*(aort[2]+log((x[2]+1e-100))+logPratio); result += x[3]*(aort[3]+log((x[3]+1e-100))+logPratio); result += x[4]*(aort[4]+log((x[4]+1e-100))+logPratio); result += x[5]*(aort[5]+log((x[5]+1e-100))+logPratio); result += x[6]*(aort[6]+log((x[6]+1e-100))+logPratio); result += x[7]*(aort[7]+log((x[7]+1e-100))+logPratio); result += x[8]*(aort[8]+log((x[8]+1e-100))+logPratio); result += x[9]*(aort[9]+log((x[9]+1e-100))+logPratio); result += x[10]*(aort[10]+log((x[10]+1e-100))+logPratio); result += x[11]*(aort[11]+log((x[11]+1e-100))+logPratio); result += x[12]*(aort[12]+log((x[12]+1e-100))+logPratio); result += x[13]*(aort[13]+log((x[13]+1e-100))+logPratio); result += x[14]*(aort[14]+log((x[14]+1e-100))+logPratio); result += x[15]*(aort[15]+log((x[15]+1e-100))+logPratio); result += x[16]*(aort[16]+log((x[16]+1e-100))+logPratio); result += x[17]*(aort[17]+log((x[17]+1e-100))+logPratio); result += x[18]*(aort[18]+log((x[18]+1e-100))+logPratio); /*Scale by RT/W */ *abms = result * RT * YOW; } /*compute the production rate for each species */ /* void CKWC(double * restrict T, double * restrict C, int * iwrk, double * restrict rwrk, double * restrict wdot) */ /* { */ /* int id; /\*loop counter *\/ */ /* /\*convert to SI *\/ */ /* for (id = 0; id < 19; ++id) { */ /* C[id] *= 1.0e6; */ /* } */ /* /\*convert to chemkin units *\/ */ /* productionRate(wdot, C, *T); */ /* /\*convert to chemkin units *\/ */ /* for (id = 0; id < 19; ++id) { */ /* C[id] *= 1.0e-6; */ /* wdot[id] *= 1.0e-6; */ /* } */ /* } */ /*Returns the molar production rate of species */ /*Given P, T, and mass fractions */ void CKWYP(double * restrict P, double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict wdot) { int id; /*loop counter */ double c[19]; /*temporary storage */ double YOW = 0; double PWORT; /*Compute inverse of mean molecular wt first */ YOW += y[0]*imw[0]; /*H2 */ YOW += y[1]*imw[1]; /*H */ YOW += y[2]*imw[2]; /*O2 */ YOW += y[3]*imw[3]; /*OH */ YOW += y[4]*imw[4]; /*H2O */ YOW += y[5]*imw[5]; /*HO2 */ YOW += y[6]*imw[6]; /*H2O2 */ YOW += y[7]*imw[7]; /*CH3 */ YOW += y[8]*imw[8]; /*CH4 */ YOW += y[9]*imw[9]; /*CO */ YOW += y[10]*imw[10]; /*CO2 */ YOW += y[11]*imw[11]; /*CH2O */ YOW += y[12]*imw[12]; /*C2H2 */ YOW += y[13]*imw[13]; /*C2H4 */ YOW += y[14]*imw[14]; /*C2H6 */ YOW += y[15]*imw[15]; /*NH3 */ YOW += y[16]*imw[16]; /*NO */ YOW += y[17]*imw[17]; /*HCN */ YOW += y[18]*imw[18]; /*N2 */ /*PW/RT (see Eq. 7) */ PWORT = (*P)/(YOW * 8.31451e+07 * (*T)); /*multiply by 1e6 so c goes to SI */ PWORT *= 1e6; /*Now compute conversion (and go to SI) */ c[0] = PWORT * y[0]*imw[0]; c[1] = PWORT * y[1]*imw[1]; c[2] = PWORT * y[2]*imw[2]; c[3] = PWORT * y[3]*imw[3]; c[4] = PWORT * y[4]*imw[4]; c[5] = PWORT * y[5]*imw[5]; c[6] = PWORT * y[6]*imw[6]; c[7] = PWORT * y[7]*imw[7]; c[8] = PWORT * y[8]*imw[8]; c[9] = PWORT * y[9]*imw[9]; c[10] = PWORT * y[10]*imw[10]; c[11] = PWORT * y[11]*imw[11]; c[12] = PWORT * y[12]*imw[12]; c[13] = PWORT * y[13]*imw[13]; c[14] = PWORT * y[14]*imw[14]; c[15] = PWORT * y[15]*imw[15]; c[16] = PWORT * y[16]*imw[16]; c[17] = PWORT * y[17]*imw[17]; c[18] = PWORT * y[18]*imw[18]; /*convert to chemkin units */ productionRate(wdot, c, *T); /*convert to chemkin units */ for (id = 0; id < 19; ++id) { wdot[id] *= 1.0e-6; } } /*Returns the molar production rate of species */ /*Given P, T, and mole fractions */ void CKWXP(double * restrict P, double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict wdot) { int id; /*loop counter */ double c[19]; /*temporary storage */ double PORT = 1e6 * (*P)/(8.31451e+07 * (*T)); /*1e6 * P/RT so c goes to SI units */ /*Compute conversion, see Eq 10 */ for (id = 0; id < 19; ++id) { c[id] = x[id]*PORT; } /*convert to chemkin units */ productionRate(wdot, c, *T); /*convert to chemkin units */ for (id = 0; id < 19; ++id) { wdot[id] *= 1.0e-6; } } /*Returns the molar production rate of species */ /*Given rho, T, and mass fractions */ void CKWYR(double * restrict rho, double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict wdot) { int id; /*loop counter */ double c[19]; /*temporary storage */ /*See Eq 8 with an extra 1e6 so c goes to SI */ c[0] = 1e6 * (*rho) * y[0]*imw[0]; c[1] = 1e6 * (*rho) * y[1]*imw[1]; c[2] = 1e6 * (*rho) * y[2]*imw[2]; c[3] = 1e6 * (*rho) * y[3]*imw[3]; c[4] = 1e6 * (*rho) * y[4]*imw[4]; c[5] = 1e6 * (*rho) * y[5]*imw[5]; c[6] = 1e6 * (*rho) * y[6]*imw[6]; c[7] = 1e6 * (*rho) * y[7]*imw[7]; c[8] = 1e6 * (*rho) * y[8]*imw[8]; c[9] = 1e6 * (*rho) * y[9]*imw[9]; c[10] = 1e6 * (*rho) * y[10]*imw[10]; c[11] = 1e6 * (*rho) * y[11]*imw[11]; c[12] = 1e6 * (*rho) * y[12]*imw[12]; c[13] = 1e6 * (*rho) * y[13]*imw[13]; c[14] = 1e6 * (*rho) * y[14]*imw[14]; c[15] = 1e6 * (*rho) * y[15]*imw[15]; c[16] = 1e6 * (*rho) * y[16]*imw[16]; c[17] = 1e6 * (*rho) * y[17]*imw[17]; c[18] = 1e6 * (*rho) * y[18]*imw[18]; /*call productionRate */ productionRate(wdot, c, *T); /*convert to chemkin units */ for (id = 0; id < 19; ++id) { wdot[id] *= 1.0e-6; } } /*Returns the molar production rate of species */ /*Given rho, T, and mass fractions */ void VCKWYR(int * restrict np, double * restrict rho, double * restrict T, double * restrict y, int * restrict iwrk, double * restrict rwrk, double * restrict wdot) { double c[19*(*np)]; /*temporary storage */ /*See Eq 8 with an extra 1e6 so c goes to SI */ for (int n=0; n<19; n++) { for (int i=0; i<(*np); i++) { c[n*(*np)+i] = 1.0e6 * rho[i] * y[n*(*np)+i] * imw[n]; } } /*call productionRate */ vproductionRate(*np, wdot, c, T); /*convert to chemkin units */ for (int i=0; i<19*(*np); i++) { wdot[i] *= 1.0e-6; } } /*Returns the molar production rate of species */ /*Given rho, T, and mole fractions */ void CKWXR(double * restrict rho, double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict wdot) { int id; /*loop counter */ double c[19]; /*temporary storage */ double XW = 0; /*See Eq 4, 11 in CK Manual */ double ROW; /*Compute mean molecular wt first */ XW += x[0]*2.015940; /*H2 */ XW += x[1]*1.007970; /*H */ XW += x[2]*31.998800; /*O2 */ XW += x[3]*17.007370; /*OH */ XW += x[4]*18.015340; /*H2O */ XW += x[5]*33.006770; /*HO2 */ XW += x[6]*34.014740; /*H2O2 */ XW += x[7]*15.035060; /*CH3 */ XW += x[8]*16.043030; /*CH4 */ XW += x[9]*28.010550; /*CO */ XW += x[10]*44.009950; /*CO2 */ XW += x[11]*30.026490; /*CH2O */ XW += x[12]*26.038240; /*C2H2 */ XW += x[13]*28.054180; /*C2H4 */ XW += x[14]*30.070120; /*C2H6 */ XW += x[15]*17.030610; /*NH3 */ XW += x[16]*30.006100; /*NO */ XW += x[17]*27.025820; /*HCN */ XW += x[18]*28.013400; /*N2 */ /*Extra 1e6 factor to take c to SI */ ROW = 1e6*(*rho) / XW; /*Compute conversion, see Eq 11 */ for (id = 0; id < 19; ++id) { c[id] = x[id]*ROW; } /*convert to chemkin units */ productionRate(wdot, c, *T); /*convert to chemkin units */ for (id = 0; id < 19; ++id) { wdot[id] *= 1.0e-6; } } /*Returns the rate of progress for each reaction */ void CKQC(double * restrict T, double * restrict C, int * iwrk, double * restrict rwrk, double * restrict qdot) { int id; /*loop counter */ /*convert to SI */ for (id = 0; id < 19; ++id) { C[id] *= 1.0e6; } /*convert to chemkin units */ progressRate(qdot, C, *T); /*convert to chemkin units */ for (id = 0; id < 19; ++id) { C[id] *= 1.0e-6; } for (id = 0; id < 0; ++id) { qdot[id] *= 1.0e-6; } } /*Returns the progress rates of each reactions */ /*Given P, T, and mole fractions */ void CKKFKR(double * restrict P, double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict q_f, double * restrict q_r) { int id; /*loop counter */ double c[19]; /*temporary storage */ double PORT = 1e6 * (*P)/(8.31451e+07 * (*T)); /*1e6 * P/RT so c goes to SI units */ /*Compute conversion, see Eq 10 */ for (id = 0; id < 19; ++id) { c[id] = x[id]*PORT; } /*convert to chemkin units */ progressRateFR(q_f, q_r, c, *T); /*convert to chemkin units */ for (id = 0; id < 0; ++id) { q_f[id] *= 1.0e-6; q_r[id] *= 1.0e-6; } } /*Returns the progress rates of each reactions */ /*Given P, T, and mass fractions */ void CKQYP(double * restrict P, double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict qdot) { int id; /*loop counter */ double c[19]; /*temporary storage */ double YOW = 0; double PWORT; /*Compute inverse of mean molecular wt first */ YOW += y[0]*imw[0]; /*H2 */ YOW += y[1]*imw[1]; /*H */ YOW += y[2]*imw[2]; /*O2 */ YOW += y[3]*imw[3]; /*OH */ YOW += y[4]*imw[4]; /*H2O */ YOW += y[5]*imw[5]; /*HO2 */ YOW += y[6]*imw[6]; /*H2O2 */ YOW += y[7]*imw[7]; /*CH3 */ YOW += y[8]*imw[8]; /*CH4 */ YOW += y[9]*imw[9]; /*CO */ YOW += y[10]*imw[10]; /*CO2 */ YOW += y[11]*imw[11]; /*CH2O */ YOW += y[12]*imw[12]; /*C2H2 */ YOW += y[13]*imw[13]; /*C2H4 */ YOW += y[14]*imw[14]; /*C2H6 */ YOW += y[15]*imw[15]; /*NH3 */ YOW += y[16]*imw[16]; /*NO */ YOW += y[17]*imw[17]; /*HCN */ YOW += y[18]*imw[18]; /*N2 */ /*PW/RT (see Eq. 7) */ PWORT = (*P)/(YOW * 8.31451e+07 * (*T)); /*multiply by 1e6 so c goes to SI */ PWORT *= 1e6; /*Now compute conversion (and go to SI) */ c[0] = PWORT * y[0]*imw[0]; c[1] = PWORT * y[1]*imw[1]; c[2] = PWORT * y[2]*imw[2]; c[3] = PWORT * y[3]*imw[3]; c[4] = PWORT * y[4]*imw[4]; c[5] = PWORT * y[5]*imw[5]; c[6] = PWORT * y[6]*imw[6]; c[7] = PWORT * y[7]*imw[7]; c[8] = PWORT * y[8]*imw[8]; c[9] = PWORT * y[9]*imw[9]; c[10] = PWORT * y[10]*imw[10]; c[11] = PWORT * y[11]*imw[11]; c[12] = PWORT * y[12]*imw[12]; c[13] = PWORT * y[13]*imw[13]; c[14] = PWORT * y[14]*imw[14]; c[15] = PWORT * y[15]*imw[15]; c[16] = PWORT * y[16]*imw[16]; c[17] = PWORT * y[17]*imw[17]; c[18] = PWORT * y[18]*imw[18]; /*convert to chemkin units */ progressRate(qdot, c, *T); /*convert to chemkin units */ for (id = 0; id < 0; ++id) { qdot[id] *= 1.0e-6; } } /*Returns the progress rates of each reactions */ /*Given P, T, and mole fractions */ void CKQXP(double * restrict P, double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict qdot) { int id; /*loop counter */ double c[19]; /*temporary storage */ double PORT = 1e6 * (*P)/(8.31451e+07 * (*T)); /*1e6 * P/RT so c goes to SI units */ /*Compute conversion, see Eq 10 */ for (id = 0; id < 19; ++id) { c[id] = x[id]*PORT; } /*convert to chemkin units */ progressRate(qdot, c, *T); /*convert to chemkin units */ for (id = 0; id < 0; ++id) { qdot[id] *= 1.0e-6; } } /*Returns the progress rates of each reactions */ /*Given rho, T, and mass fractions */ void CKQYR(double * restrict rho, double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict qdot) { int id; /*loop counter */ double c[19]; /*temporary storage */ /*See Eq 8 with an extra 1e6 so c goes to SI */ c[0] = 1e6 * (*rho) * y[0]*imw[0]; c[1] = 1e6 * (*rho) * y[1]*imw[1]; c[2] = 1e6 * (*rho) * y[2]*imw[2]; c[3] = 1e6 * (*rho) * y[3]*imw[3]; c[4] = 1e6 * (*rho) * y[4]*imw[4]; c[5] = 1e6 * (*rho) * y[5]*imw[5]; c[6] = 1e6 * (*rho) * y[6]*imw[6]; c[7] = 1e6 * (*rho) * y[7]*imw[7]; c[8] = 1e6 * (*rho) * y[8]*imw[8]; c[9] = 1e6 * (*rho) * y[9]*imw[9]; c[10] = 1e6 * (*rho) * y[10]*imw[10]; c[11] = 1e6 * (*rho) * y[11]*imw[11]; c[12] = 1e6 * (*rho) * y[12]*imw[12]; c[13] = 1e6 * (*rho) * y[13]*imw[13]; c[14] = 1e6 * (*rho) * y[14]*imw[14]; c[15] = 1e6 * (*rho) * y[15]*imw[15]; c[16] = 1e6 * (*rho) * y[16]*imw[16]; c[17] = 1e6 * (*rho) * y[17]*imw[17]; c[18] = 1e6 * (*rho) * y[18]*imw[18]; /*call progressRate */ progressRate(qdot, c, *T); /*convert to chemkin units */ for (id = 0; id < 0; ++id) { qdot[id] *= 1.0e-6; } } /*Returns the progress rates of each reactions */ /*Given rho, T, and mole fractions */ void CKQXR(double * restrict rho, double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict qdot) { int id; /*loop counter */ double c[19]; /*temporary storage */ double XW = 0; /*See Eq 4, 11 in CK Manual */ double ROW; /*Compute mean molecular wt first */ XW += x[0]*2.015940; /*H2 */ XW += x[1]*1.007970; /*H */ XW += x[2]*31.998800; /*O2 */ XW += x[3]*17.007370; /*OH */ XW += x[4]*18.015340; /*H2O */ XW += x[5]*33.006770; /*HO2 */ XW += x[6]*34.014740; /*H2O2 */ XW += x[7]*15.035060; /*CH3 */ XW += x[8]*16.043030; /*CH4 */ XW += x[9]*28.010550; /*CO */ XW += x[10]*44.009950; /*CO2 */ XW += x[11]*30.026490; /*CH2O */ XW += x[12]*26.038240; /*C2H2 */ XW += x[13]*28.054180; /*C2H4 */ XW += x[14]*30.070120; /*C2H6 */ XW += x[15]*17.030610; /*NH3 */ XW += x[16]*30.006100; /*NO */ XW += x[17]*27.025820; /*HCN */ XW += x[18]*28.013400; /*N2 */ /*Extra 1e6 factor to take c to SI */ ROW = 1e6*(*rho) / XW; /*Compute conversion, see Eq 11 */ for (id = 0; id < 19; ++id) { c[id] = x[id]*ROW; } /*convert to chemkin units */ progressRate(qdot, c, *T); /*convert to chemkin units */ for (id = 0; id < 0; ++id) { qdot[id] *= 1.0e-6; } } /*Returns the stoichiometric coefficients */ /*of the reaction mechanism. (Eq 50) */ void CKNU(int * kdim, int * iwrk, double * restrict rwrk, int * nuki) { int id; /*loop counter */ int kd = (*kdim); /*Zero nuki */ for (id = 0; id < 19 * kd; ++ id) { nuki[id] = 0; } } /*Returns the elemental composition */ /*of the speciesi (mdim is num of elements) */ void CKNCF(int * mdim, int * iwrk, double * restrict rwrk, int * ncf) { int id; /*loop counter */ int kd = (*mdim); /*Zero ncf */ for (id = 0; id < kd * 19; ++ id) { ncf[id] = 0; } /*H2 */ ncf[ 0 * kd + 1 ] = 2; /*H */ /*H */ ncf[ 1 * kd + 1 ] = 1; /*H */ /*O2 */ ncf[ 2 * kd + 0 ] = 2; /*O */ /*OH */ ncf[ 3 * kd + 0 ] = 1; /*O */ ncf[ 3 * kd + 1 ] = 1; /*H */ /*H2O */ ncf[ 4 * kd + 1 ] = 2; /*H */ ncf[ 4 * kd + 0 ] = 1; /*O */ /*HO2 */ ncf[ 5 * kd + 1 ] = 1; /*H */ ncf[ 5 * kd + 0 ] = 2; /*O */ /*H2O2 */ ncf[ 6 * kd + 1 ] = 2; /*H */ ncf[ 6 * kd + 0 ] = 2; /*O */ /*CH3 */ ncf[ 7 * kd + 2 ] = 1; /*C */ ncf[ 7 * kd + 1 ] = 3; /*H */ /*CH4 */ ncf[ 8 * kd + 2 ] = 1; /*C */ ncf[ 8 * kd + 1 ] = 4; /*H */ /*CO */ ncf[ 9 * kd + 2 ] = 1; /*C */ ncf[ 9 * kd + 0 ] = 1; /*O */ /*CO2 */ ncf[ 10 * kd + 2 ] = 1; /*C */ ncf[ 10 * kd + 0 ] = 2; /*O */ /*CH2O */ ncf[ 11 * kd + 1 ] = 2; /*H */ ncf[ 11 * kd + 2 ] = 1; /*C */ ncf[ 11 * kd + 0 ] = 1; /*O */ /*C2H2 */ ncf[ 12 * kd + 2 ] = 2; /*C */ ncf[ 12 * kd + 1 ] = 2; /*H */ /*C2H4 */ ncf[ 13 * kd + 2 ] = 2; /*C */ ncf[ 13 * kd + 1 ] = 4; /*H */ /*C2H6 */ ncf[ 14 * kd + 2 ] = 2; /*C */ ncf[ 14 * kd + 1 ] = 6; /*H */ /*NH3 */ ncf[ 15 * kd + 3 ] = 1; /*N */ ncf[ 15 * kd + 1 ] = 3; /*H */ /*NO */ ncf[ 16 * kd + 3 ] = 1; /*N */ ncf[ 16 * kd + 0 ] = 1; /*O */ /*HCN */ ncf[ 17 * kd + 1 ] = 1; /*H */ ncf[ 17 * kd + 2 ] = 1; /*C */ ncf[ 17 * kd + 3 ] = 1; /*N */ /*N2 */ ncf[ 18 * kd + 3 ] = 2; /*N */ } /*Returns the arrehenius coefficients */ /*for all reactions */ void CKABE(int * iwrk, double * restrict rwrk, double * restrict a, double * restrict b, double * restrict e) { for (int i=0; i<0; ++i) { a[i] = fwd_A[i]; b[i] = fwd_beta[i]; e[i] = fwd_Ea[i]; } return; } /*Returns the equil constants for each reaction */ void CKEQC(double * restrict T, double * restrict C, int * iwrk, double * restrict rwrk, double * restrict eqcon) { double tT = *T; /*temporary temperature */ double tc[] = { log(tT), tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; /*temperature cache */ double gort[19]; /* temporary storage */ /*compute the Gibbs free energy */ gibbs(gort, tc); /*compute the equilibrium constants */ equilibriumConstants(eqcon, gort, tT); } /*Returns the equil constants for each reaction */ /*Given P, T, and mass fractions */ void CKEQYP(double * restrict P, double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict eqcon) { double tT = *T; /*temporary temperature */ double tc[] = { log(tT), tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; /*temperature cache */ double gort[19]; /* temporary storage */ /*compute the Gibbs free energy */ gibbs(gort, tc); /*compute the equilibrium constants */ equilibriumConstants(eqcon, gort, tT); } /*Returns the equil constants for each reaction */ /*Given P, T, and mole fractions */ void CKEQXP(double * restrict P, double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict eqcon) { double tT = *T; /*temporary temperature */ double tc[] = { log(tT), tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; /*temperature cache */ double gort[19]; /* temporary storage */ /*compute the Gibbs free energy */ gibbs(gort, tc); /*compute the equilibrium constants */ equilibriumConstants(eqcon, gort, tT); } /*Returns the equil constants for each reaction */ /*Given rho, T, and mass fractions */ void CKEQYR(double * restrict rho, double * restrict T, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict eqcon) { double tT = *T; /*temporary temperature */ double tc[] = { log(tT), tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; /*temperature cache */ double gort[19]; /* temporary storage */ /*compute the Gibbs free energy */ gibbs(gort, tc); /*compute the equilibrium constants */ equilibriumConstants(eqcon, gort, tT); } /*Returns the equil constants for each reaction */ /*Given rho, T, and mole fractions */ void CKEQXR(double * restrict rho, double * restrict T, double * restrict x, int * iwrk, double * restrict rwrk, double * restrict eqcon) { double tT = *T; /*temporary temperature */ double tc[] = { log(tT), tT, tT*tT, tT*tT*tT, tT*tT*tT*tT }; /*temperature cache */ double gort[19]; /* temporary storage */ /*compute the Gibbs free energy */ gibbs(gort, tc); /*compute the equilibrium constants */ equilibriumConstants(eqcon, gort, tT); } static double T_save = -1; #ifdef _OPENMP #pragma omp threadprivate(T_save) #endif static double k_f_save[0]; #ifdef _OPENMP #pragma omp threadprivate(k_f_save) #endif static double Kc_save[0]; #ifdef _OPENMP #pragma omp threadprivate(Kc_save) #endif /*compute the production rate for each species */ void productionRate(double * restrict wdot, double * restrict sc, double T) { double tc[] = { log(T), T, T*T, T*T*T, T*T*T*T }; /*temperature cache */ double invT = 1.0 / tc[1]; if (T != T_save) { T_save = T; comp_k_f(tc,invT,k_f_save); comp_Kc(tc,invT,Kc_save); } double qdot, q_f[0], q_r[0]; comp_qfqr(q_f, q_r, sc, tc, invT); for (int i = 0; i < 19; ++i) { wdot[i] = 0.0; } return; } void comp_k_f(double * restrict tc, double invT, double * restrict k_f) { #ifdef __INTEL_COMPILER #pragma simd #endif for (int i=0; i<0; ++i) { k_f[i] = prefactor_units[i] * fwd_A[i] * exp(fwd_beta[i] * tc[0] - activation_units[i] * fwd_Ea[i] * invT); }; return; } void comp_Kc(double * restrict tc, double invT, double * restrict Kc) { /*compute the Gibbs free energy */ double g_RT[19]; gibbs(g_RT, tc); #ifdef __INTEL_COMPILER #pragma simd #endif for (int i=0; i<0; ++i) { Kc[i] = exp(Kc[i]); }; /*reference concentration: P_atm / (RT) in inverse mol/m^3 */ double refC = 101325 / 8.31451 * invT; double refCinv = 1 / refC; return; } void comp_qfqr(double * restrict qf, double * restrict qr, double * restrict sc, double * restrict tc, double invT) { double T = tc[1]; /*compute the mixture concentration */ double mixture = 0.0; for (int i = 0; i < 19; ++i) { mixture += sc[i]; } double Corr[0]; for (int i = 0; i < 0; ++i) { Corr[i] = 1.0; } for (int i=0; i<0; i++) { qf[i] *= Corr[i] * k_f_save[i]; qr[i] *= Corr[i] * k_f_save[i] / Kc_save[i]; } return; } /*compute the production rate for each species */ void vproductionRate(int npt, double * restrict wdot, double * restrict sc, double * restrict T) { double k_f_s[0*npt], Kc_s[0*npt], mixture[npt], g_RT[19*npt]; double tc[5*npt], invT[npt]; #ifdef __INTEL_COMPILER #pragma simd #endif for (int i=0; i<npt; i++) { tc[0*npt+i] = log(T[i]); tc[1*npt+i] = T[i]; tc[2*npt+i] = T[i]*T[i]; tc[3*npt+i] = T[i]*T[i]*T[i]; tc[4*npt+i] = T[i]*T[i]*T[i]*T[i]; invT[i] = 1.0 / T[i]; } for (int i=0; i<npt; i++) { mixture[i] = 0.0; } for (int n=0; n<19; n++) { for (int i=0; i<npt; i++) { mixture[i] += sc[n*npt+i]; wdot[n*npt+i] = 0.0; } } vcomp_k_f(npt, k_f_s, tc, invT); vcomp_gibbs(npt, g_RT, tc); vcomp_Kc(npt, Kc_s, g_RT, invT); vcomp_wdot(npt, wdot, mixture, sc, k_f_s, Kc_s, tc, invT, T); } void vcomp_k_f(int npt, double * restrict k_f_s, double * restrict tc, double * restrict invT) { #ifdef __INTEL_COMPILER #pragma simd #endif for (int i=0; i<npt; i++) { } } void vcomp_gibbs(int npt, double * restrict g_RT, double * restrict tc) { /*compute the Gibbs free energy */ for (int i=0; i<npt; i++) { double tg[5], g[19]; tg[0] = tc[0*npt+i]; tg[1] = tc[1*npt+i]; tg[2] = tc[2*npt+i]; tg[3] = tc[3*npt+i]; tg[4] = tc[4*npt+i]; gibbs(g, tg); g_RT[0*npt+i] = g[0]; g_RT[1*npt+i] = g[1]; g_RT[2*npt+i] = g[2]; g_RT[3*npt+i] = g[3]; g_RT[4*npt+i] = g[4]; g_RT[5*npt+i] = g[5]; g_RT[6*npt+i] = g[6]; g_RT[7*npt+i] = g[7]; g_RT[8*npt+i] = g[8]; g_RT[9*npt+i] = g[9]; g_RT[10*npt+i] = g[10]; g_RT[11*npt+i] = g[11]; g_RT[12*npt+i] = g[12]; g_RT[13*npt+i] = g[13]; g_RT[14*npt+i] = g[14]; g_RT[15*npt+i] = g[15]; g_RT[16*npt+i] = g[16]; g_RT[17*npt+i] = g[17]; g_RT[18*npt+i] = g[18]; } } void vcomp_Kc(int npt, double * restrict Kc_s, double * restrict g_RT, double * restrict invT) { #ifdef __INTEL_COMPILER #pragma simd #endif for (int i=0; i<npt; i++) { /*reference concentration: P_atm / (RT) in inverse mol/m^3 */ double refC = (101325. / 8.31451) * invT[i]; double refCinv = 1.0 / refC; } } void vcomp_wdot(int npt, double * restrict wdot, double * restrict mixture, double * restrict sc, double * restrict k_f_s, double * restrict Kc_s, double * restrict tc, double * restrict invT, double * restrict T) { #ifdef __INTEL_COMPILER #pragma simd #endif for (int i=0; i<npt; i++) { double qdot, q_f, q_r, phi_f, phi_r, k_f, k_r, Kc; } } /*compute the reaction Jacobian */ void DWDOT(double * restrict J, double * restrict sc, double * restrict Tp, int * consP) { double c[19]; for (int k=0; k<19; k++) { c[k] = 1.e6 * sc[k]; } aJacobian(J, c, *Tp, *consP); /* dwdot[k]/dT */ for (int k=0; k<19; k++) { J[380+k] *= 1.e-6; } /* dTdot/d[X] */ for (int k=0; k<19; k++) { J[k*20+19] *= 1.e6; } return; } /*compute the reaction Jacobian */ void aJacobian(double * restrict J, double * restrict sc, double T, int consP) { for (int i=0; i<400; i++) { J[i] = 0.0; } double wdot[19]; for (int k=0; k<19; k++) { wdot[k] = 0.0; } double tc[] = { log(T), T, T*T, T*T*T, T*T*T*T }; /*temperature cache */ double invT = 1.0 / tc[1]; double invT2 = invT * invT; /*reference concentration: P_atm / (RT) in inverse mol/m^3 */ double refC = 101325 / 8.31451 / T; double refCinv = 1.0 / refC; /*compute the mixture concentration */ double mixture = 0.0; for (int k = 0; k < 19; ++k) { mixture += sc[k]; } /*compute the Gibbs free energy */ double g_RT[19]; gibbs(g_RT, tc); /*compute the species enthalpy */ double h_RT[19]; speciesEnthalpy(h_RT, tc); double phi_f, k_f, k_r, phi_r, Kc, q, q_nocor, Corr, alpha; double dlnkfdT, dlnk0dT, dlnKcdT, dkrdT, dqdT; double dqdci, dcdc_fac, dqdc[19]; double Pr, fPr, F, k_0, logPr; double logFcent, troe_c, troe_n, troePr_den, troePr, troe; double Fcent1, Fcent2, Fcent3, Fcent; double dlogFdc, dlogFdn, dlogFdcn_fac; double dlogPrdT, dlogfPrdT, dlogFdT, dlogFcentdT, dlogFdlogPr, dlnCorrdT; const double ln10 = log(10.0); const double log10e = 1.0/log(10.0); double c_R[19], dcRdT[19], e_RT[19]; double * eh_RT; if (consP) { cp_R(c_R, tc); dcvpRdT(dcRdT, tc); eh_RT = &h_RT[0]; } else { cv_R(c_R, tc); dcvpRdT(dcRdT, tc); speciesInternalEnergy(e_RT, tc); eh_RT = &e_RT[0]; } double cmix = 0.0, ehmix = 0.0, dcmixdT=0.0, dehmixdT=0.0; for (int k = 0; k < 19; ++k) { cmix += c_R[k]*sc[k]; dcmixdT += dcRdT[k]*sc[k]; ehmix += eh_RT[k]*wdot[k]; dehmixdT += invT*(c_R[k]-eh_RT[k])*wdot[k] + eh_RT[k]*J[380+k]; } double cmixinv = 1.0/cmix; double tmp1 = ehmix*cmixinv; double tmp3 = cmixinv*T; double tmp2 = tmp1*tmp3; double dehmixdc; /* dTdot/d[X] */ for (int k = 0; k < 19; ++k) { dehmixdc = 0.0; for (int m = 0; m < 19; ++m) { dehmixdc += eh_RT[m]*J[k*20+m]; } J[k*20+19] = tmp2*c_R[k] - tmp3*dehmixdc; } /* dTdot/dT */ J[399] = -tmp1 + tmp2*dcmixdT - tmp3*dehmixdT; } /*compute d(Cp/R)/dT and d(Cv/R)/dT at the given temperature */ /*tc contains precomputed powers of T, tc[0] = log(T) */ void dcvpRdT(double * restrict species, double * restrict tc) { /*temperature */ double T = tc[1]; /*species with midpoint at T=1000 kelvin */ if (T < 1000) { /*species 0: H2 */ species[0] = +7.98052075e-03 -3.89563020e-05 * tc[1] +6.04716282e-08 * tc[2] -2.95044704e-11 * tc[3]; /*species 1: H */ species[1] = +7.05332819e-13 -3.99183928e-15 * tc[1] +6.90244896e-18 * tc[2] -3.71092933e-21 * tc[3]; /*species 2: O2 */ species[2] = -2.99673416e-03 +1.96946040e-05 * tc[1] -2.90438853e-08 * tc[2] +1.29749135e-11 * tc[3]; /*species 3: OH */ species[3] = -2.40131752e-03 +9.23587682e-06 * tc[1] -1.16434000e-08 * tc[2] +5.45645880e-12 * tc[3]; /*species 4: H2O */ species[4] = -2.03643410e-03 +1.30408042e-05 * tc[1] -1.64639119e-08 * tc[2] +7.08791268e-12 * tc[3]; /*species 5: HO2 */ species[5] = -4.74912051e-03 +4.23165782e-05 * tc[1] -7.28291682e-08 * tc[2] +3.71690050e-11 * tc[3]; /*species 6: H2O2 */ species[6] = -5.42822417e-04 +3.34671402e-05 * tc[1] -6.47312439e-08 * tc[2] +3.44981745e-11 * tc[3]; /*species 7: CH3 */ species[7] = +2.01095175e-03 +1.14604371e-05 * tc[1] -2.06135228e-08 * tc[2] +1.01754294e-11 * tc[3]; /*species 8: CH4 */ species[8] = -1.36709788e-02 +9.83601198e-05 * tc[1] -1.45422908e-07 * tc[2] +6.66775824e-11 * tc[3]; /*species 9: CO */ species[9] = -6.10353680e-04 +2.03362866e-06 * tc[1] +2.72101765e-09 * tc[2] -3.61769800e-12 * tc[3]; /*species 10: CO2 */ species[10] = +8.98459677e-03 -1.42471254e-05 * tc[1] +7.37757066e-09 * tc[2] -5.74798192e-13 * tc[3]; /*species 11: CH2O */ species[11] = -9.90833369e-03 +7.46440016e-05 * tc[1] -1.13785578e-07 * tc[2] +5.27090608e-11 * tc[3]; /*species 12: C2H2 */ species[12] = +2.33615629e-02 -7.10343630e-05 * tc[1] +8.40457311e-08 * tc[2] -3.40029190e-11 * tc[3]; /*species 13: C2H4 */ species[13] = -7.57052247e-03 +1.14198058e-04 * tc[1] -2.07476626e-07 * tc[2] +1.07953749e-10 * tc[3]; /*species 14: C2H6 */ species[14] = -5.50154270e-03 +1.19887658e-04 * tc[1] -2.12539886e-07 * tc[2] +1.07474308e-10 * tc[3]; /*species 15: NH3 */ species[15] = -4.66052300e-03 +4.34370260e-05 * tc[1] -6.84266610e-08 * tc[2] +3.30552184e-11 * tc[3]; /*species 16: NO */ species[16] = -4.63897600e-03 +2.20820440e-05 * tc[1] -2.80084062e-08 * tc[2] +1.12143080e-11 * tc[3]; /*species 17: HCN */ species[17] = +1.00511700e-02 -2.67035260e-05 * tc[1] +3.02770470e-08 * tc[2] -1.20356112e-11 * tc[3]; /*species 18: N2 */ species[18] = +1.40824040e-03 -7.92644400e-06 * tc[1] +1.69245450e-08 * tc[2] -9.77941600e-12 * tc[3]; } else { /*species 0: H2 */ species[0] = -4.94024731e-05 +9.98913556e-07 * tc[1] -5.38699182e-10 * tc[2] +8.01021504e-14 * tc[3]; /*species 1: H */ species[1] = -2.30842973e-11 +3.23123896e-14 * tc[1] -1.42054571e-17 * tc[2] +1.99278943e-21 * tc[3]; /*species 2: O2 */ species[2] = +1.48308754e-03 -1.51593334e-06 * tc[1] +6.28411665e-10 * tc[2] -8.66871176e-14 * tc[3]; /*species 3: OH */ species[3] = +5.48429716e-04 +2.53010456e-07 * tc[1] -2.63838467e-10 * tc[2] +4.69649504e-14 * tc[3]; /*species 4: H2O */ species[4] = +2.17691804e-03 -3.28145036e-07 * tc[1] -2.91125961e-10 * tc[2] +6.72803968e-14 * tc[3]; /*species 5: HO2 */ species[5] = +2.23982013e-03 -1.26731630e-06 * tc[1] +3.42739110e-10 * tc[2] -4.31634140e-14 * tc[3]; /*species 6: H2O2 */ species[6] = +4.90831694e-03 -3.80278450e-06 * tc[1] +1.11355796e-09 * tc[2] -1.15163322e-13 * tc[3]; /*species 7: CH3 */ species[7] = +7.23990037e-03 -5.97428696e-06 * tc[1] +1.78705393e-09 * tc[2] -1.86861758e-13 * tc[3]; /*species 8: CH4 */ species[8] = +1.33909467e-02 -1.14657162e-05 * tc[1] +3.66877605e-09 * tc[2] -4.07260920e-13 * tc[3]; /*species 9: CO */ species[9] = +2.06252743e-03 -1.99765154e-06 * tc[1] +6.90159024e-10 * tc[2] -8.14590864e-14 * tc[3]; /*species 10: CO2 */ species[10] = +4.41437026e-03 -4.42962808e-06 * tc[1] +1.57047056e-09 * tc[2] -1.88833666e-13 * tc[3]; /*species 11: CH2O */ species[11] = +9.20000082e-03 -8.84517626e-06 * tc[1] +3.01923636e-09 * tc[2] -3.53542256e-13 * tc[3]; /*species 12: C2H2 */ species[12] = +5.96166664e-03 -4.74589704e-06 * tc[1] +1.40223651e-09 * tc[2] -1.44494085e-13 * tc[3]; /*species 13: C2H4 */ species[13] = +1.46454151e-02 -1.34215583e-05 * tc[1] +4.41668769e-09 * tc[2] -5.02824244e-13 * tc[3]; /*species 14: C2H6 */ species[14] = +2.16852677e-02 -2.00512134e-05 * tc[1] +6.64236003e-09 * tc[2] -7.60011560e-13 * tc[3]; /*species 15: NH3 */ species[15] = +5.66625600e-03 -3.45573520e-06 * tc[1] +7.16014830e-10 * tc[2] -5.03151440e-14 * tc[3]; /*species 16: NO */ species[16] = +1.19110430e-03 -8.58340960e-07 * tc[1] +2.08373007e-10 * tc[2] -1.61344396e-14 * tc[3]; /*species 17: HCN */ species[17] = +3.14642280e-03 -2.12643700e-06 * tc[1] +4.98592710e-10 * tc[2] -3.91990280e-14 * tc[3]; /*species 18: N2 */ species[18] = +1.48797680e-03 -1.13695200e-06 * tc[1] +3.02911140e-10 * tc[2] -2.70134040e-14 * tc[3]; } return; } /*compute the progress rate for each reaction */ void progressRate(double * restrict qdot, double * restrict sc, double T) { double tc[] = { log(T), T, T*T, T*T*T, T*T*T*T }; /*temperature cache */ double invT = 1.0 / tc[1]; if (T != T_save) { T_save = T; comp_k_f(tc,invT,k_f_save); comp_Kc(tc,invT,Kc_save); } double q_f[0], q_r[0]; comp_qfqr(q_f, q_r, sc, tc, invT); for (int i = 0; i < 0; ++i) { qdot[i] = q_f[i] - q_r[i]; } return; } /*compute the progress rate for each reaction */ void progressRateFR(double * restrict q_f, double * restrict q_r, double * restrict sc, double T) { double tc[] = { log(T), T, T*T, T*T*T, T*T*T*T }; /*temperature cache */ double invT = 1.0 / tc[1]; if (T != T_save) { T_save = T; comp_k_f(tc,invT,k_f_save); comp_Kc(tc,invT,Kc_save); } comp_qfqr(q_f, q_r, sc, tc, invT); return; } /*compute the equilibrium constants for each reaction */ void equilibriumConstants(double * restrict kc, double * restrict g_RT, double T) { /*reference concentration: P_atm / (RT) in inverse mol/m^3 */ double refC = 101325 / 8.31451 / T; return; } /*compute the g/(RT) at the given temperature */ /*tc contains precomputed powers of T, tc[0] = log(T) */ void gibbs(double * restrict species, double * restrict tc) { /*temperature */ double T = tc[1]; double invT = 1 / T; /*species with midpoint at T=1000 kelvin */ if (T < 1000) { /*species 0: H2 */ species[0] = -9.179351730000000e+02 * invT +1.661320882000000e+00 -2.344331120000000e+00 * tc[0] -3.990260375000000e-03 * tc[1] +3.246358500000000e-06 * tc[2] -1.679767450000000e-09 * tc[3] +3.688058805000000e-13 * tc[4]; /*species 1: H */ species[1] = +2.547365990000000e+04 * invT +2.946682853000000e+00 -2.500000000000000e+00 * tc[0] -3.526664095000000e-13 * tc[1] +3.326532733333333e-16 * tc[2] -1.917346933333333e-19 * tc[3] +4.638661660000000e-23 * tc[4]; /*species 2: O2 */ species[2] = -1.063943560000000e+03 * invT +1.247806300000001e-01 -3.782456360000000e+00 * tc[0] +1.498367080000000e-03 * tc[1] -1.641217001666667e-06 * tc[2] +8.067745908333334e-10 * tc[3] -1.621864185000000e-13 * tc[4]; /*species 3: OH */ species[3] = +3.615080560000000e+03 * invT +4.095940888000000e+00 -3.992015430000000e+00 * tc[0] +1.200658760000000e-03 * tc[1] -7.696564016666666e-07 * tc[2] +3.234277775000000e-10 * tc[3] -6.820573500000000e-14 * tc[4]; /*species 4: H2O */ species[4] = -3.029372670000000e+04 * invT +5.047672768000000e+00 -4.198640560000000e+00 * tc[0] +1.018217050000000e-03 * tc[1] -1.086733685000000e-06 * tc[2] +4.573308850000000e-10 * tc[3] -8.859890850000000e-14 * tc[4]; /*species 5: HO2 */ species[5] = +2.948080400000000e+02 * invT +5.851355599999999e-01 -4.301798010000000e+00 * tc[0] +2.374560255000000e-03 * tc[1] -3.526381516666666e-06 * tc[2] +2.023032450000000e-09 * tc[3] -4.646125620000001e-13 * tc[4]; /*species 6: H2O2 */ species[6] = -1.770258210000000e+04 * invT +8.410619499999998e-01 -4.276112690000000e+00 * tc[0] +2.714112085000000e-04 * tc[1] -2.788928350000000e-06 * tc[2] +1.798090108333333e-09 * tc[3] -4.312271815000000e-13 * tc[4]; /*species 7: CH3 */ species[7] = +1.644499880000000e+04 * invT +2.069026070000000e+00 -3.673590400000000e+00 * tc[0] -1.005475875000000e-03 * tc[1] -9.550364266666668e-07 * tc[2] +5.725978541666666e-10 * tc[3] -1.271928670000000e-13 * tc[4]; /*species 8: CH4 */ species[8] = -1.024664760000000e+04 * invT +9.791179889999999e+00 -5.149876130000000e+00 * tc[0] +6.835489400000000e-03 * tc[1] -8.196676650000000e-06 * tc[2] +4.039525216666667e-09 * tc[3] -8.334697800000000e-13 * tc[4]; /*species 9: CO */ species[9] = -1.434408600000000e+04 * invT +7.112418999999992e-02 -3.579533470000000e+00 * tc[0] +3.051768400000000e-04 * tc[1] -1.694690550000000e-07 * tc[2] -7.558382366666667e-11 * tc[3] +4.522122495000000e-14 * tc[4]; /*species 10: CO2 */ species[10] = -4.837196970000000e+04 * invT -7.544278700000000e+00 -2.356773520000000e+00 * tc[0] -4.492298385000000e-03 * tc[1] +1.187260448333333e-06 * tc[2] -2.049325183333333e-10 * tc[3] +7.184977399999999e-15 * tc[4]; /*species 11: CH2O */ species[11] = -1.430895670000000e+04 * invT +4.190910250000000e+00 -4.793723150000000e+00 * tc[0] +4.954166845000000e-03 * tc[1] -6.220333466666666e-06 * tc[2] +3.160710508333333e-09 * tc[3] -6.588632600000000e-13 * tc[4]; /*species 12: C2H2 */ species[12] = +2.642898070000000e+04 * invT -1.313102400600000e+01 -8.086810940000000e-01 * tc[0] -1.168078145000000e-02 * tc[1] +5.919530250000000e-06 * tc[2] -2.334603641666667e-09 * tc[3] +4.250364870000000e-13 * tc[4]; /*species 13: C2H4 */ species[13] = +5.089775930000000e+03 * invT -1.381294799999999e-01 -3.959201480000000e+00 * tc[0] +3.785261235000000e-03 * tc[1] -9.516504866666667e-06 * tc[2] +5.763239608333333e-09 * tc[3] -1.349421865000000e-12 * tc[4]; /*species 14: C2H6 */ species[14] = -1.152220550000000e+04 * invT +1.624601760000000e+00 -4.291424920000000e+00 * tc[0] +2.750771350000000e-03 * tc[1] -9.990638133333334e-06 * tc[2] +5.903885708333334e-09 * tc[3] -1.343428855000000e-12 * tc[4]; /*species 15: NH3 */ species[15] = -6.741728500000000e+03 * invT +4.911400170000000e+00 -4.286027400000000e+00 * tc[0] +2.330261500000000e-03 * tc[1] -3.619752166666667e-06 * tc[2] +1.900740583333333e-09 * tc[3] -4.131902300000000e-13 * tc[4]; /*species 16: NO */ species[16] = +9.844623000000000e+03 * invT +1.937629900000000e+00 -4.218476300000000e+00 * tc[0] +2.319488000000000e-03 * tc[1] -1.840170333333333e-06 * tc[2] +7.780112833333333e-10 * tc[3] -1.401788500000000e-13 * tc[4]; /*species 17: HCN */ species[17] = +1.471263300000000e+04 * invT -6.657453300000000e+00 -2.258988600000000e+00 * tc[0] -5.025585000000000e-03 * tc[1] +2.225293833333333e-06 * tc[2] -8.410290833333334e-10 * tc[3] +1.504451400000000e-13 * tc[4]; /*species 18: N2 */ species[18] = -1.020899900000000e+03 * invT -6.516950000000001e-01 -3.298677000000000e+00 * tc[0] -7.041202000000000e-04 * tc[1] +6.605369999999999e-07 * tc[2] -4.701262500000001e-10 * tc[3] +1.222427000000000e-13 * tc[4]; } else { /*species 0: H2 */ species[0] = -9.501589220000000e+02 * invT +6.542302510000000e+00 -3.337279200000000e+00 * tc[0] +2.470123655000000e-05 * tc[1] -8.324279633333333e-08 * tc[2] +1.496386616666667e-11 * tc[3] -1.001276880000000e-15 * tc[4]; /*species 1: H */ species[1] = +2.547365990000000e+04 * invT +2.946682924000000e+00 -2.500000010000000e+00 * tc[0] +1.154214865000000e-11 * tc[1] -2.692699133333334e-15 * tc[2] +3.945960291666667e-19 * tc[3] -2.490986785000000e-23 * tc[4]; /*species 2: O2 */ species[2] = -1.088457720000000e+03 * invT -2.170693450000000e+00 -3.282537840000000e+00 * tc[0] -7.415437700000000e-04 * tc[1] +1.263277781666667e-07 * tc[2] -1.745587958333333e-11 * tc[3] +1.083588970000000e-15 * tc[4]; /*species 3: OH */ species[3] = +3.858657000000000e+03 * invT -1.383808430000000e+00 -3.092887670000000e+00 * tc[0] -2.742148580000000e-04 * tc[1] -2.108420466666667e-08 * tc[2] +7.328846300000000e-12 * tc[3] -5.870618800000000e-16 * tc[4]; /*species 4: H2O */ species[4] = -3.000429710000000e+04 * invT -1.932777610000000e+00 -3.033992490000000e+00 * tc[0] -1.088459020000000e-03 * tc[1] +2.734541966666666e-08 * tc[2] +8.086832250000000e-12 * tc[3] -8.410049600000000e-16 * tc[4]; /*species 5: HO2 */ species[5] = +1.118567130000000e+02 * invT +2.321087500000001e-01 -4.017210900000000e+00 * tc[0] -1.119910065000000e-03 * tc[1] +1.056096916666667e-07 * tc[2] -9.520530833333334e-12 * tc[3] +5.395426750000000e-16 * tc[4]; /*species 6: H2O2 */ species[6] = -1.786178770000000e+04 * invT +1.248846229999999e+00 -4.165002850000000e+00 * tc[0] -2.454158470000000e-03 * tc[1] +3.168987083333333e-07 * tc[2] -3.093216550000000e-11 * tc[3] +1.439541525000000e-15 * tc[4]; /*species 7: CH3 */ species[7] = +1.677558430000000e+04 * invT -6.194354070000000e+00 -2.285717720000000e+00 * tc[0] -3.619950185000000e-03 * tc[1] +4.978572466666667e-07 * tc[2] -4.964038700000000e-11 * tc[3] +2.335771970000000e-15 * tc[4]; /*species 8: CH4 */ species[8] = -9.468344590000001e+03 * invT -1.836246650500000e+01 -7.485149500000000e-02 * tc[0] -6.695473350000000e-03 * tc[1] +9.554763483333333e-07 * tc[2] -1.019104458333333e-10 * tc[3] +5.090761500000000e-15 * tc[4]; /*species 9: CO */ species[9] = -1.415187240000000e+04 * invT -5.103502110000000e+00 -2.715185610000000e+00 * tc[0] -1.031263715000000e-03 * tc[1] +1.664709618333334e-07 * tc[2] -1.917108400000000e-11 * tc[3] +1.018238580000000e-15 * tc[4]; /*species 10: CO2 */ species[10] = -4.875916600000000e+04 * invT +1.585822230000000e+00 -3.857460290000000e+00 * tc[0] -2.207185130000000e-03 * tc[1] +3.691356733333334e-07 * tc[2] -4.362418233333334e-11 * tc[3] +2.360420820000000e-15 * tc[4]; /*species 11: CH2O */ species[11] = -1.399583230000000e+04 * invT -1.189563292000000e+01 -1.760690080000000e+00 * tc[0] -4.600000410000000e-03 * tc[1] +7.370980216666666e-07 * tc[2] -8.386767666666666e-11 * tc[3] +4.419278200000001e-15 * tc[4]; /*species 12: C2H2 */ species[12] = +2.593599920000000e+04 * invT +5.377850850000001e+00 -4.147569640000000e+00 * tc[0] -2.980833320000000e-03 * tc[1] +3.954914200000000e-07 * tc[2] -3.895101425000000e-11 * tc[3] +1.806176065000000e-15 * tc[4]; /*species 13: C2H4 */ species[13] = +4.939886140000000e+03 * invT -8.269258140000002e+00 -2.036111160000000e+00 * tc[0] -7.322707550000000e-03 * tc[1] +1.118463191666667e-06 * tc[2] -1.226857691666667e-10 * tc[3] +6.285303050000000e-15 * tc[4]; /*species 14: C2H6 */ species[14] = -1.142639320000000e+04 * invT -1.404372920000000e+01 -1.071881500000000e+00 * tc[0] -1.084263385000000e-02 * tc[1] +1.670934450000000e-06 * tc[2] -1.845100008333333e-10 * tc[3] +9.500144500000000e-15 * tc[4]; /*species 15: NH3 */ species[15] = -6.544695800000000e+03 * invT -3.931840700000000e+00 -2.634452100000000e+00 * tc[0] -2.833128000000000e-03 * tc[1] +2.879779333333333e-07 * tc[2] -1.988930083333333e-11 * tc[3] +6.289392999999999e-16 * tc[4]; /*species 16: NO */ species[16] = +9.920974600000000e+03 * invT -3.108697100000001e+00 -3.260605600000000e+00 * tc[0] -5.955521500000000e-04 * tc[1] +7.152841333333333e-08 * tc[2] -5.788139083333334e-12 * tc[3] +2.016804950000000e-16 * tc[4]; /*species 17: HCN */ species[17] = +1.440729200000000e+04 * invT +2.226779100000000e+00 -3.802239200000000e+00 * tc[0] -1.573211400000000e-03 * tc[1] +1.772030833333333e-07 * tc[2] -1.384979750000000e-11 * tc[3] +4.899878500000000e-16 * tc[4]; /*species 18: N2 */ species[18] = -9.227977000000000e+02 * invT -3.053888000000000e+00 -2.926640000000000e+00 * tc[0] -7.439884000000000e-04 * tc[1] +9.474600000000001e-08 * tc[2] -8.414198333333333e-12 * tc[3] +3.376675500000000e-16 * tc[4]; } return; } /*compute the a/(RT) at the given temperature */ /*tc contains precomputed powers of T, tc[0] = log(T) */ void helmholtz(double * restrict species, double * restrict tc) { /*temperature */ double T = tc[1]; double invT = 1 / T; /*species with midpoint at T=1000 kelvin */ if (T < 1000) { /*species 0: H2 */ species[0] = -9.17935173e+02 * invT +6.61320882e-01 -2.34433112e+00 * tc[0] -3.99026037e-03 * tc[1] +3.24635850e-06 * tc[2] -1.67976745e-09 * tc[3] +3.68805881e-13 * tc[4]; /*species 1: H */ species[1] = +2.54736599e+04 * invT +1.94668285e+00 -2.50000000e+00 * tc[0] -3.52666409e-13 * tc[1] +3.32653273e-16 * tc[2] -1.91734693e-19 * tc[3] +4.63866166e-23 * tc[4]; /*species 2: O2 */ species[2] = -1.06394356e+03 * invT -8.75219370e-01 -3.78245636e+00 * tc[0] +1.49836708e-03 * tc[1] -1.64121700e-06 * tc[2] +8.06774591e-10 * tc[3] -1.62186418e-13 * tc[4]; /*species 3: OH */ species[3] = +3.61508056e+03 * invT +3.09594089e+00 -3.99201543e+00 * tc[0] +1.20065876e-03 * tc[1] -7.69656402e-07 * tc[2] +3.23427778e-10 * tc[3] -6.82057350e-14 * tc[4]; /*species 4: H2O */ species[4] = -3.02937267e+04 * invT +4.04767277e+00 -4.19864056e+00 * tc[0] +1.01821705e-03 * tc[1] -1.08673369e-06 * tc[2] +4.57330885e-10 * tc[3] -8.85989085e-14 * tc[4]; /*species 5: HO2 */ species[5] = +2.94808040e+02 * invT -4.14864440e-01 -4.30179801e+00 * tc[0] +2.37456025e-03 * tc[1] -3.52638152e-06 * tc[2] +2.02303245e-09 * tc[3] -4.64612562e-13 * tc[4]; /*species 6: H2O2 */ species[6] = -1.77025821e+04 * invT -1.58938050e-01 -4.27611269e+00 * tc[0] +2.71411208e-04 * tc[1] -2.78892835e-06 * tc[2] +1.79809011e-09 * tc[3] -4.31227182e-13 * tc[4]; /*species 7: CH3 */ species[7] = +1.64449988e+04 * invT +1.06902607e+00 -3.67359040e+00 * tc[0] -1.00547588e-03 * tc[1] -9.55036427e-07 * tc[2] +5.72597854e-10 * tc[3] -1.27192867e-13 * tc[4]; /*species 8: CH4 */ species[8] = -1.02466476e+04 * invT +8.79117989e+00 -5.14987613e+00 * tc[0] +6.83548940e-03 * tc[1] -8.19667665e-06 * tc[2] +4.03952522e-09 * tc[3] -8.33469780e-13 * tc[4]; /*species 9: CO */ species[9] = -1.43440860e+04 * invT -9.28875810e-01 -3.57953347e+00 * tc[0] +3.05176840e-04 * tc[1] -1.69469055e-07 * tc[2] -7.55838237e-11 * tc[3] +4.52212249e-14 * tc[4]; /*species 10: CO2 */ species[10] = -4.83719697e+04 * invT -8.54427870e+00 -2.35677352e+00 * tc[0] -4.49229839e-03 * tc[1] +1.18726045e-06 * tc[2] -2.04932518e-10 * tc[3] +7.18497740e-15 * tc[4]; /*species 11: CH2O */ species[11] = -1.43089567e+04 * invT +3.19091025e+00 -4.79372315e+00 * tc[0] +4.95416684e-03 * tc[1] -6.22033347e-06 * tc[2] +3.16071051e-09 * tc[3] -6.58863260e-13 * tc[4]; /*species 12: C2H2 */ species[12] = +2.64289807e+04 * invT -1.41310240e+01 -8.08681094e-01 * tc[0] -1.16807815e-02 * tc[1] +5.91953025e-06 * tc[2] -2.33460364e-09 * tc[3] +4.25036487e-13 * tc[4]; /*species 13: C2H4 */ species[13] = +5.08977593e+03 * invT -1.13812948e+00 -3.95920148e+00 * tc[0] +3.78526124e-03 * tc[1] -9.51650487e-06 * tc[2] +5.76323961e-09 * tc[3] -1.34942187e-12 * tc[4]; /*species 14: C2H6 */ species[14] = -1.15222055e+04 * invT +6.24601760e-01 -4.29142492e+00 * tc[0] +2.75077135e-03 * tc[1] -9.99063813e-06 * tc[2] +5.90388571e-09 * tc[3] -1.34342886e-12 * tc[4]; /*species 15: NH3 */ species[15] = -6.74172850e+03 * invT +3.91140017e+00 -4.28602740e+00 * tc[0] +2.33026150e-03 * tc[1] -3.61975217e-06 * tc[2] +1.90074058e-09 * tc[3] -4.13190230e-13 * tc[4]; /*species 16: NO */ species[16] = +9.84462300e+03 * invT +9.37629900e-01 -4.21847630e+00 * tc[0] +2.31948800e-03 * tc[1] -1.84017033e-06 * tc[2] +7.78011283e-10 * tc[3] -1.40178850e-13 * tc[4]; /*species 17: HCN */ species[17] = +1.47126330e+04 * invT -7.65745330e+00 -2.25898860e+00 * tc[0] -5.02558500e-03 * tc[1] +2.22529383e-06 * tc[2] -8.41029083e-10 * tc[3] +1.50445140e-13 * tc[4]; /*species 18: N2 */ species[18] = -1.02089990e+03 * invT -1.65169500e+00 -3.29867700e+00 * tc[0] -7.04120200e-04 * tc[1] +6.60537000e-07 * tc[2] -4.70126250e-10 * tc[3] +1.22242700e-13 * tc[4]; } else { /*species 0: H2 */ species[0] = -9.50158922e+02 * invT +5.54230251e+00 -3.33727920e+00 * tc[0] +2.47012365e-05 * tc[1] -8.32427963e-08 * tc[2] +1.49638662e-11 * tc[3] -1.00127688e-15 * tc[4]; /*species 1: H */ species[1] = +2.54736599e+04 * invT +1.94668292e+00 -2.50000001e+00 * tc[0] +1.15421486e-11 * tc[1] -2.69269913e-15 * tc[2] +3.94596029e-19 * tc[3] -2.49098679e-23 * tc[4]; /*species 2: O2 */ species[2] = -1.08845772e+03 * invT -3.17069345e+00 -3.28253784e+00 * tc[0] -7.41543770e-04 * tc[1] +1.26327778e-07 * tc[2] -1.74558796e-11 * tc[3] +1.08358897e-15 * tc[4]; /*species 3: OH */ species[3] = +3.85865700e+03 * invT -2.38380843e+00 -3.09288767e+00 * tc[0] -2.74214858e-04 * tc[1] -2.10842047e-08 * tc[2] +7.32884630e-12 * tc[3] -5.87061880e-16 * tc[4]; /*species 4: H2O */ species[4] = -3.00042971e+04 * invT -2.93277761e+00 -3.03399249e+00 * tc[0] -1.08845902e-03 * tc[1] +2.73454197e-08 * tc[2] +8.08683225e-12 * tc[3] -8.41004960e-16 * tc[4]; /*species 5: HO2 */ species[5] = +1.11856713e+02 * invT -7.67891250e-01 -4.01721090e+00 * tc[0] -1.11991006e-03 * tc[1] +1.05609692e-07 * tc[2] -9.52053083e-12 * tc[3] +5.39542675e-16 * tc[4]; /*species 6: H2O2 */ species[6] = -1.78617877e+04 * invT +2.48846230e-01 -4.16500285e+00 * tc[0] -2.45415847e-03 * tc[1] +3.16898708e-07 * tc[2] -3.09321655e-11 * tc[3] +1.43954153e-15 * tc[4]; /*species 7: CH3 */ species[7] = +1.67755843e+04 * invT -7.19435407e+00 -2.28571772e+00 * tc[0] -3.61995018e-03 * tc[1] +4.97857247e-07 * tc[2] -4.96403870e-11 * tc[3] +2.33577197e-15 * tc[4]; /*species 8: CH4 */ species[8] = -9.46834459e+03 * invT -1.93624665e+01 -7.48514950e-02 * tc[0] -6.69547335e-03 * tc[1] +9.55476348e-07 * tc[2] -1.01910446e-10 * tc[3] +5.09076150e-15 * tc[4]; /*species 9: CO */ species[9] = -1.41518724e+04 * invT -6.10350211e+00 -2.71518561e+00 * tc[0] -1.03126372e-03 * tc[1] +1.66470962e-07 * tc[2] -1.91710840e-11 * tc[3] +1.01823858e-15 * tc[4]; /*species 10: CO2 */ species[10] = -4.87591660e+04 * invT +5.85822230e-01 -3.85746029e+00 * tc[0] -2.20718513e-03 * tc[1] +3.69135673e-07 * tc[2] -4.36241823e-11 * tc[3] +2.36042082e-15 * tc[4]; /*species 11: CH2O */ species[11] = -1.39958323e+04 * invT -1.28956329e+01 -1.76069008e+00 * tc[0] -4.60000041e-03 * tc[1] +7.37098022e-07 * tc[2] -8.38676767e-11 * tc[3] +4.41927820e-15 * tc[4]; /*species 12: C2H2 */ species[12] = +2.59359992e+04 * invT +4.37785085e+00 -4.14756964e+00 * tc[0] -2.98083332e-03 * tc[1] +3.95491420e-07 * tc[2] -3.89510143e-11 * tc[3] +1.80617607e-15 * tc[4]; /*species 13: C2H4 */ species[13] = +4.93988614e+03 * invT -9.26925814e+00 -2.03611116e+00 * tc[0] -7.32270755e-03 * tc[1] +1.11846319e-06 * tc[2] -1.22685769e-10 * tc[3] +6.28530305e-15 * tc[4]; /*species 14: C2H6 */ species[14] = -1.14263932e+04 * invT -1.50437292e+01 -1.07188150e+00 * tc[0] -1.08426339e-02 * tc[1] +1.67093445e-06 * tc[2] -1.84510001e-10 * tc[3] +9.50014450e-15 * tc[4]; /*species 15: NH3 */ species[15] = -6.54469580e+03 * invT -4.93184070e+00 -2.63445210e+00 * tc[0] -2.83312800e-03 * tc[1] +2.87977933e-07 * tc[2] -1.98893008e-11 * tc[3] +6.28939300e-16 * tc[4]; /*species 16: NO */ species[16] = +9.92097460e+03 * invT -4.10869710e+00 -3.26060560e+00 * tc[0] -5.95552150e-04 * tc[1] +7.15284133e-08 * tc[2] -5.78813908e-12 * tc[3] +2.01680495e-16 * tc[4]; /*species 17: HCN */ species[17] = +1.44072920e+04 * invT +1.22677910e+00 -3.80223920e+00 * tc[0] -1.57321140e-03 * tc[1] +1.77203083e-07 * tc[2] -1.38497975e-11 * tc[3] +4.89987850e-16 * tc[4]; /*species 18: N2 */ species[18] = -9.22797700e+02 * invT -4.05388800e+00 -2.92664000e+00 * tc[0] -7.43988400e-04 * tc[1] +9.47460000e-08 * tc[2] -8.41419833e-12 * tc[3] +3.37667550e-16 * tc[4]; } return; } /*compute Cv/R at the given temperature */ /*tc contains precomputed powers of T, tc[0] = log(T) */ void cv_R(double * restrict species, double * restrict tc) { /*temperature */ double T = tc[1]; /*species with midpoint at T=1000 kelvin */ if (T < 1000) { /*species 0: H2 */ species[0] = +1.34433112e+00 +7.98052075e-03 * tc[1] -1.94781510e-05 * tc[2] +2.01572094e-08 * tc[3] -7.37611761e-12 * tc[4]; /*species 1: H */ species[1] = +1.50000000e+00 +7.05332819e-13 * tc[1] -1.99591964e-15 * tc[2] +2.30081632e-18 * tc[3] -9.27732332e-22 * tc[4]; /*species 2: O2 */ species[2] = +2.78245636e+00 -2.99673416e-03 * tc[1] +9.84730201e-06 * tc[2] -9.68129509e-09 * tc[3] +3.24372837e-12 * tc[4]; /*species 3: OH */ species[3] = +2.99201543e+00 -2.40131752e-03 * tc[1] +4.61793841e-06 * tc[2] -3.88113333e-09 * tc[3] +1.36411470e-12 * tc[4]; /*species 4: H2O */ species[4] = +3.19864056e+00 -2.03643410e-03 * tc[1] +6.52040211e-06 * tc[2] -5.48797062e-09 * tc[3] +1.77197817e-12 * tc[4]; /*species 5: HO2 */ species[5] = +3.30179801e+00 -4.74912051e-03 * tc[1] +2.11582891e-05 * tc[2] -2.42763894e-08 * tc[3] +9.29225124e-12 * tc[4]; /*species 6: H2O2 */ species[6] = +3.27611269e+00 -5.42822417e-04 * tc[1] +1.67335701e-05 * tc[2] -2.15770813e-08 * tc[3] +8.62454363e-12 * tc[4]; /*species 7: CH3 */ species[7] = +2.67359040e+00 +2.01095175e-03 * tc[1] +5.73021856e-06 * tc[2] -6.87117425e-09 * tc[3] +2.54385734e-12 * tc[4]; /*species 8: CH4 */ species[8] = +4.14987613e+00 -1.36709788e-02 * tc[1] +4.91800599e-05 * tc[2] -4.84743026e-08 * tc[3] +1.66693956e-11 * tc[4]; /*species 9: CO */ species[9] = +2.57953347e+00 -6.10353680e-04 * tc[1] +1.01681433e-06 * tc[2] +9.07005884e-10 * tc[3] -9.04424499e-13 * tc[4]; /*species 10: CO2 */ species[10] = +1.35677352e+00 +8.98459677e-03 * tc[1] -7.12356269e-06 * tc[2] +2.45919022e-09 * tc[3] -1.43699548e-13 * tc[4]; /*species 11: CH2O */ species[11] = +3.79372315e+00 -9.90833369e-03 * tc[1] +3.73220008e-05 * tc[2] -3.79285261e-08 * tc[3] +1.31772652e-11 * tc[4]; /*species 12: C2H2 */ species[12] = -1.91318906e-01 +2.33615629e-02 * tc[1] -3.55171815e-05 * tc[2] +2.80152437e-08 * tc[3] -8.50072974e-12 * tc[4]; /*species 13: C2H4 */ species[13] = +2.95920148e+00 -7.57052247e-03 * tc[1] +5.70990292e-05 * tc[2] -6.91588753e-08 * tc[3] +2.69884373e-11 * tc[4]; /*species 14: C2H6 */ species[14] = +3.29142492e+00 -5.50154270e-03 * tc[1] +5.99438288e-05 * tc[2] -7.08466285e-08 * tc[3] +2.68685771e-11 * tc[4]; /*species 15: NH3 */ species[15] = +3.28602740e+00 -4.66052300e-03 * tc[1] +2.17185130e-05 * tc[2] -2.28088870e-08 * tc[3] +8.26380460e-12 * tc[4]; /*species 16: NO */ species[16] = +3.21847630e+00 -4.63897600e-03 * tc[1] +1.10410220e-05 * tc[2] -9.33613540e-09 * tc[3] +2.80357700e-12 * tc[4]; /*species 17: HCN */ species[17] = +1.25898860e+00 +1.00511700e-02 * tc[1] -1.33517630e-05 * tc[2] +1.00923490e-08 * tc[3] -3.00890280e-12 * tc[4]; /*species 18: N2 */ species[18] = +2.29867700e+00 +1.40824040e-03 * tc[1] -3.96322200e-06 * tc[2] +5.64151500e-09 * tc[3] -2.44485400e-12 * tc[4]; } else { /*species 0: H2 */ species[0] = +2.33727920e+00 -4.94024731e-05 * tc[1] +4.99456778e-07 * tc[2] -1.79566394e-10 * tc[3] +2.00255376e-14 * tc[4]; /*species 1: H */ species[1] = +1.50000001e+00 -2.30842973e-11 * tc[1] +1.61561948e-14 * tc[2] -4.73515235e-18 * tc[3] +4.98197357e-22 * tc[4]; /*species 2: O2 */ species[2] = +2.28253784e+00 +1.48308754e-03 * tc[1] -7.57966669e-07 * tc[2] +2.09470555e-10 * tc[3] -2.16717794e-14 * tc[4]; /*species 3: OH */ species[3] = +2.09288767e+00 +5.48429716e-04 * tc[1] +1.26505228e-07 * tc[2] -8.79461556e-11 * tc[3] +1.17412376e-14 * tc[4]; /*species 4: H2O */ species[4] = +2.03399249e+00 +2.17691804e-03 * tc[1] -1.64072518e-07 * tc[2] -9.70419870e-11 * tc[3] +1.68200992e-14 * tc[4]; /*species 5: HO2 */ species[5] = +3.01721090e+00 +2.23982013e-03 * tc[1] -6.33658150e-07 * tc[2] +1.14246370e-10 * tc[3] -1.07908535e-14 * tc[4]; /*species 6: H2O2 */ species[6] = +3.16500285e+00 +4.90831694e-03 * tc[1] -1.90139225e-06 * tc[2] +3.71185986e-10 * tc[3] -2.87908305e-14 * tc[4]; /*species 7: CH3 */ species[7] = +1.28571772e+00 +7.23990037e-03 * tc[1] -2.98714348e-06 * tc[2] +5.95684644e-10 * tc[3] -4.67154394e-14 * tc[4]; /*species 8: CH4 */ species[8] = -9.25148505e-01 +1.33909467e-02 * tc[1] -5.73285809e-06 * tc[2] +1.22292535e-09 * tc[3] -1.01815230e-13 * tc[4]; /*species 9: CO */ species[9] = +1.71518561e+00 +2.06252743e-03 * tc[1] -9.98825771e-07 * tc[2] +2.30053008e-10 * tc[3] -2.03647716e-14 * tc[4]; /*species 10: CO2 */ species[10] = +2.85746029e+00 +4.41437026e-03 * tc[1] -2.21481404e-06 * tc[2] +5.23490188e-10 * tc[3] -4.72084164e-14 * tc[4]; /*species 11: CH2O */ species[11] = +7.60690080e-01 +9.20000082e-03 * tc[1] -4.42258813e-06 * tc[2] +1.00641212e-09 * tc[3] -8.83855640e-14 * tc[4]; /*species 12: C2H2 */ species[12] = +3.14756964e+00 +5.96166664e-03 * tc[1] -2.37294852e-06 * tc[2] +4.67412171e-10 * tc[3] -3.61235213e-14 * tc[4]; /*species 13: C2H4 */ species[13] = +1.03611116e+00 +1.46454151e-02 * tc[1] -6.71077915e-06 * tc[2] +1.47222923e-09 * tc[3] -1.25706061e-13 * tc[4]; /*species 14: C2H6 */ species[14] = +7.18815000e-02 +2.16852677e-02 * tc[1] -1.00256067e-05 * tc[2] +2.21412001e-09 * tc[3] -1.90002890e-13 * tc[4]; /*species 15: NH3 */ species[15] = +1.63445210e+00 +5.66625600e-03 * tc[1] -1.72786760e-06 * tc[2] +2.38671610e-10 * tc[3] -1.25787860e-14 * tc[4]; /*species 16: NO */ species[16] = +2.26060560e+00 +1.19110430e-03 * tc[1] -4.29170480e-07 * tc[2] +6.94576690e-11 * tc[3] -4.03360990e-15 * tc[4]; /*species 17: HCN */ species[17] = +2.80223920e+00 +3.14642280e-03 * tc[1] -1.06321850e-06 * tc[2] +1.66197570e-10 * tc[3] -9.79975700e-15 * tc[4]; /*species 18: N2 */ species[18] = +1.92664000e+00 +1.48797680e-03 * tc[1] -5.68476000e-07 * tc[2] +1.00970380e-10 * tc[3] -6.75335100e-15 * tc[4]; } return; } /*compute Cp/R at the given temperature */ /*tc contains precomputed powers of T, tc[0] = log(T) */ void cp_R(double * restrict species, double * restrict tc) { /*temperature */ double T = tc[1]; /*species with midpoint at T=1000 kelvin */ if (T < 1000) { /*species 0: H2 */ species[0] = +2.34433112e+00 +7.98052075e-03 * tc[1] -1.94781510e-05 * tc[2] +2.01572094e-08 * tc[3] -7.37611761e-12 * tc[4]; /*species 1: H */ species[1] = +2.50000000e+00 +7.05332819e-13 * tc[1] -1.99591964e-15 * tc[2] +2.30081632e-18 * tc[3] -9.27732332e-22 * tc[4]; /*species 2: O2 */ species[2] = +3.78245636e+00 -2.99673416e-03 * tc[1] +9.84730201e-06 * tc[2] -9.68129509e-09 * tc[3] +3.24372837e-12 * tc[4]; /*species 3: OH */ species[3] = +3.99201543e+00 -2.40131752e-03 * tc[1] +4.61793841e-06 * tc[2] -3.88113333e-09 * tc[3] +1.36411470e-12 * tc[4]; /*species 4: H2O */ species[4] = +4.19864056e+00 -2.03643410e-03 * tc[1] +6.52040211e-06 * tc[2] -5.48797062e-09 * tc[3] +1.77197817e-12 * tc[4]; /*species 5: HO2 */ species[5] = +4.30179801e+00 -4.74912051e-03 * tc[1] +2.11582891e-05 * tc[2] -2.42763894e-08 * tc[3] +9.29225124e-12 * tc[4]; /*species 6: H2O2 */ species[6] = +4.27611269e+00 -5.42822417e-04 * tc[1] +1.67335701e-05 * tc[2] -2.15770813e-08 * tc[3] +8.62454363e-12 * tc[4]; /*species 7: CH3 */ species[7] = +3.67359040e+00 +2.01095175e-03 * tc[1] +5.73021856e-06 * tc[2] -6.87117425e-09 * tc[3] +2.54385734e-12 * tc[4]; /*species 8: CH4 */ species[8] = +5.14987613e+00 -1.36709788e-02 * tc[1] +4.91800599e-05 * tc[2] -4.84743026e-08 * tc[3] +1.66693956e-11 * tc[4]; /*species 9: CO */ species[9] = +3.57953347e+00 -6.10353680e-04 * tc[1] +1.01681433e-06 * tc[2] +9.07005884e-10 * tc[3] -9.04424499e-13 * tc[4]; /*species 10: CO2 */ species[10] = +2.35677352e+00 +8.98459677e-03 * tc[1] -7.12356269e-06 * tc[2] +2.45919022e-09 * tc[3] -1.43699548e-13 * tc[4]; /*species 11: CH2O */ species[11] = +4.79372315e+00 -9.90833369e-03 * tc[1] +3.73220008e-05 * tc[2] -3.79285261e-08 * tc[3] +1.31772652e-11 * tc[4]; /*species 12: C2H2 */ species[12] = +8.08681094e-01 +2.33615629e-02 * tc[1] -3.55171815e-05 * tc[2] +2.80152437e-08 * tc[3] -8.50072974e-12 * tc[4]; /*species 13: C2H4 */ species[13] = +3.95920148e+00 -7.57052247e-03 * tc[1] +5.70990292e-05 * tc[2] -6.91588753e-08 * tc[3] +2.69884373e-11 * tc[4]; /*species 14: C2H6 */ species[14] = +4.29142492e+00 -5.50154270e-03 * tc[1] +5.99438288e-05 * tc[2] -7.08466285e-08 * tc[3] +2.68685771e-11 * tc[4]; /*species 15: NH3 */ species[15] = +4.28602740e+00 -4.66052300e-03 * tc[1] +2.17185130e-05 * tc[2] -2.28088870e-08 * tc[3] +8.26380460e-12 * tc[4]; /*species 16: NO */ species[16] = +4.21847630e+00 -4.63897600e-03 * tc[1] +1.10410220e-05 * tc[2] -9.33613540e-09 * tc[3] +2.80357700e-12 * tc[4]; /*species 17: HCN */ species[17] = +2.25898860e+00 +1.00511700e-02 * tc[1] -1.33517630e-05 * tc[2] +1.00923490e-08 * tc[3] -3.00890280e-12 * tc[4]; /*species 18: N2 */ species[18] = +3.29867700e+00 +1.40824040e-03 * tc[1] -3.96322200e-06 * tc[2] +5.64151500e-09 * tc[3] -2.44485400e-12 * tc[4]; } else { /*species 0: H2 */ species[0] = +3.33727920e+00 -4.94024731e-05 * tc[1] +4.99456778e-07 * tc[2] -1.79566394e-10 * tc[3] +2.00255376e-14 * tc[4]; /*species 1: H */ species[1] = +2.50000001e+00 -2.30842973e-11 * tc[1] +1.61561948e-14 * tc[2] -4.73515235e-18 * tc[3] +4.98197357e-22 * tc[4]; /*species 2: O2 */ species[2] = +3.28253784e+00 +1.48308754e-03 * tc[1] -7.57966669e-07 * tc[2] +2.09470555e-10 * tc[3] -2.16717794e-14 * tc[4]; /*species 3: OH */ species[3] = +3.09288767e+00 +5.48429716e-04 * tc[1] +1.26505228e-07 * tc[2] -8.79461556e-11 * tc[3] +1.17412376e-14 * tc[4]; /*species 4: H2O */ species[4] = +3.03399249e+00 +2.17691804e-03 * tc[1] -1.64072518e-07 * tc[2] -9.70419870e-11 * tc[3] +1.68200992e-14 * tc[4]; /*species 5: HO2 */ species[5] = +4.01721090e+00 +2.23982013e-03 * tc[1] -6.33658150e-07 * tc[2] +1.14246370e-10 * tc[3] -1.07908535e-14 * tc[4]; /*species 6: H2O2 */ species[6] = +4.16500285e+00 +4.90831694e-03 * tc[1] -1.90139225e-06 * tc[2] +3.71185986e-10 * tc[3] -2.87908305e-14 * tc[4]; /*species 7: CH3 */ species[7] = +2.28571772e+00 +7.23990037e-03 * tc[1] -2.98714348e-06 * tc[2] +5.95684644e-10 * tc[3] -4.67154394e-14 * tc[4]; /*species 8: CH4 */ species[8] = +7.48514950e-02 +1.33909467e-02 * tc[1] -5.73285809e-06 * tc[2] +1.22292535e-09 * tc[3] -1.01815230e-13 * tc[4]; /*species 9: CO */ species[9] = +2.71518561e+00 +2.06252743e-03 * tc[1] -9.98825771e-07 * tc[2] +2.30053008e-10 * tc[3] -2.03647716e-14 * tc[4]; /*species 10: CO2 */ species[10] = +3.85746029e+00 +4.41437026e-03 * tc[1] -2.21481404e-06 * tc[2] +5.23490188e-10 * tc[3] -4.72084164e-14 * tc[4]; /*species 11: CH2O */ species[11] = +1.76069008e+00 +9.20000082e-03 * tc[1] -4.42258813e-06 * tc[2] +1.00641212e-09 * tc[3] -8.83855640e-14 * tc[4]; /*species 12: C2H2 */ species[12] = +4.14756964e+00 +5.96166664e-03 * tc[1] -2.37294852e-06 * tc[2] +4.67412171e-10 * tc[3] -3.61235213e-14 * tc[4]; /*species 13: C2H4 */ species[13] = +2.03611116e+00 +1.46454151e-02 * tc[1] -6.71077915e-06 * tc[2] +1.47222923e-09 * tc[3] -1.25706061e-13 * tc[4]; /*species 14: C2H6 */ species[14] = +1.07188150e+00 +2.16852677e-02 * tc[1] -1.00256067e-05 * tc[2] +2.21412001e-09 * tc[3] -1.90002890e-13 * tc[4]; /*species 15: NH3 */ species[15] = +2.63445210e+00 +5.66625600e-03 * tc[1] -1.72786760e-06 * tc[2] +2.38671610e-10 * tc[3] -1.25787860e-14 * tc[4]; /*species 16: NO */ species[16] = +3.26060560e+00 +1.19110430e-03 * tc[1] -4.29170480e-07 * tc[2] +6.94576690e-11 * tc[3] -4.03360990e-15 * tc[4]; /*species 17: HCN */ species[17] = +3.80223920e+00 +3.14642280e-03 * tc[1] -1.06321850e-06 * tc[2] +1.66197570e-10 * tc[3] -9.79975700e-15 * tc[4]; /*species 18: N2 */ species[18] = +2.92664000e+00 +1.48797680e-03 * tc[1] -5.68476000e-07 * tc[2] +1.00970380e-10 * tc[3] -6.75335100e-15 * tc[4]; } return; } /*compute the e/(RT) at the given temperature */ /*tc contains precomputed powers of T, tc[0] = log(T) */ void speciesInternalEnergy(double * restrict species, double * restrict tc) { /*temperature */ double T = tc[1]; double invT = 1 / T; /*species with midpoint at T=1000 kelvin */ if (T < 1000) { /*species 0: H2 */ species[0] = +1.34433112e+00 +3.99026037e-03 * tc[1] -6.49271700e-06 * tc[2] +5.03930235e-09 * tc[3] -1.47522352e-12 * tc[4] -9.17935173e+02 * invT; /*species 1: H */ species[1] = +1.50000000e+00 +3.52666409e-13 * tc[1] -6.65306547e-16 * tc[2] +5.75204080e-19 * tc[3] -1.85546466e-22 * tc[4] +2.54736599e+04 * invT; /*species 2: O2 */ species[2] = +2.78245636e+00 -1.49836708e-03 * tc[1] +3.28243400e-06 * tc[2] -2.42032377e-09 * tc[3] +6.48745674e-13 * tc[4] -1.06394356e+03 * invT; /*species 3: OH */ species[3] = +2.99201543e+00 -1.20065876e-03 * tc[1] +1.53931280e-06 * tc[2] -9.70283332e-10 * tc[3] +2.72822940e-13 * tc[4] +3.61508056e+03 * invT; /*species 4: H2O */ species[4] = +3.19864056e+00 -1.01821705e-03 * tc[1] +2.17346737e-06 * tc[2] -1.37199266e-09 * tc[3] +3.54395634e-13 * tc[4] -3.02937267e+04 * invT; /*species 5: HO2 */ species[5] = +3.30179801e+00 -2.37456025e-03 * tc[1] +7.05276303e-06 * tc[2] -6.06909735e-09 * tc[3] +1.85845025e-12 * tc[4] +2.94808040e+02 * invT; /*species 6: H2O2 */ species[6] = +3.27611269e+00 -2.71411208e-04 * tc[1] +5.57785670e-06 * tc[2] -5.39427032e-09 * tc[3] +1.72490873e-12 * tc[4] -1.77025821e+04 * invT; /*species 7: CH3 */ species[7] = +2.67359040e+00 +1.00547588e-03 * tc[1] +1.91007285e-06 * tc[2] -1.71779356e-09 * tc[3] +5.08771468e-13 * tc[4] +1.64449988e+04 * invT; /*species 8: CH4 */ species[8] = +4.14987613e+00 -6.83548940e-03 * tc[1] +1.63933533e-05 * tc[2] -1.21185757e-08 * tc[3] +3.33387912e-12 * tc[4] -1.02466476e+04 * invT; /*species 9: CO */ species[9] = +2.57953347e+00 -3.05176840e-04 * tc[1] +3.38938110e-07 * tc[2] +2.26751471e-10 * tc[3] -1.80884900e-13 * tc[4] -1.43440860e+04 * invT; /*species 10: CO2 */ species[10] = +1.35677352e+00 +4.49229839e-03 * tc[1] -2.37452090e-06 * tc[2] +6.14797555e-10 * tc[3] -2.87399096e-14 * tc[4] -4.83719697e+04 * invT; /*species 11: CH2O */ species[11] = +3.79372315e+00 -4.95416684e-03 * tc[1] +1.24406669e-05 * tc[2] -9.48213152e-09 * tc[3] +2.63545304e-12 * tc[4] -1.43089567e+04 * invT; /*species 12: C2H2 */ species[12] = -1.91318906e-01 +1.16807815e-02 * tc[1] -1.18390605e-05 * tc[2] +7.00381092e-09 * tc[3] -1.70014595e-12 * tc[4] +2.64289807e+04 * invT; /*species 13: C2H4 */ species[13] = +2.95920148e+00 -3.78526124e-03 * tc[1] +1.90330097e-05 * tc[2] -1.72897188e-08 * tc[3] +5.39768746e-12 * tc[4] +5.08977593e+03 * invT; /*species 14: C2H6 */ species[14] = +3.29142492e+00 -2.75077135e-03 * tc[1] +1.99812763e-05 * tc[2] -1.77116571e-08 * tc[3] +5.37371542e-12 * tc[4] -1.15222055e+04 * invT; /*species 15: NH3 */ species[15] = +3.28602740e+00 -2.33026150e-03 * tc[1] +7.23950433e-06 * tc[2] -5.70222175e-09 * tc[3] +1.65276092e-12 * tc[4] -6.74172850e+03 * invT; /*species 16: NO */ species[16] = +3.21847630e+00 -2.31948800e-03 * tc[1] +3.68034067e-06 * tc[2] -2.33403385e-09 * tc[3] +5.60715400e-13 * tc[4] +9.84462300e+03 * invT; /*species 17: HCN */ species[17] = +1.25898860e+00 +5.02558500e-03 * tc[1] -4.45058767e-06 * tc[2] +2.52308725e-09 * tc[3] -6.01780560e-13 * tc[4] +1.47126330e+04 * invT; /*species 18: N2 */ species[18] = +2.29867700e+00 +7.04120200e-04 * tc[1] -1.32107400e-06 * tc[2] +1.41037875e-09 * tc[3] -4.88970800e-13 * tc[4] -1.02089990e+03 * invT; } else { /*species 0: H2 */ species[0] = +2.33727920e+00 -2.47012365e-05 * tc[1] +1.66485593e-07 * tc[2] -4.48915985e-11 * tc[3] +4.00510752e-15 * tc[4] -9.50158922e+02 * invT; /*species 1: H */ species[1] = +1.50000001e+00 -1.15421486e-11 * tc[1] +5.38539827e-15 * tc[2] -1.18378809e-18 * tc[3] +9.96394714e-23 * tc[4] +2.54736599e+04 * invT; /*species 2: O2 */ species[2] = +2.28253784e+00 +7.41543770e-04 * tc[1] -2.52655556e-07 * tc[2] +5.23676387e-11 * tc[3] -4.33435588e-15 * tc[4] -1.08845772e+03 * invT; /*species 3: OH */ species[3] = +2.09288767e+00 +2.74214858e-04 * tc[1] +4.21684093e-08 * tc[2] -2.19865389e-11 * tc[3] +2.34824752e-15 * tc[4] +3.85865700e+03 * invT; /*species 4: H2O */ species[4] = +2.03399249e+00 +1.08845902e-03 * tc[1] -5.46908393e-08 * tc[2] -2.42604967e-11 * tc[3] +3.36401984e-15 * tc[4] -3.00042971e+04 * invT; /*species 5: HO2 */ species[5] = +3.01721090e+00 +1.11991006e-03 * tc[1] -2.11219383e-07 * tc[2] +2.85615925e-11 * tc[3] -2.15817070e-15 * tc[4] +1.11856713e+02 * invT; /*species 6: H2O2 */ species[6] = +3.16500285e+00 +2.45415847e-03 * tc[1] -6.33797417e-07 * tc[2] +9.27964965e-11 * tc[3] -5.75816610e-15 * tc[4] -1.78617877e+04 * invT; /*species 7: CH3 */ species[7] = +1.28571772e+00 +3.61995018e-03 * tc[1] -9.95714493e-07 * tc[2] +1.48921161e-10 * tc[3] -9.34308788e-15 * tc[4] +1.67755843e+04 * invT; /*species 8: CH4 */ species[8] = -9.25148505e-01 +6.69547335e-03 * tc[1] -1.91095270e-06 * tc[2] +3.05731338e-10 * tc[3] -2.03630460e-14 * tc[4] -9.46834459e+03 * invT; /*species 9: CO */ species[9] = +1.71518561e+00 +1.03126372e-03 * tc[1] -3.32941924e-07 * tc[2] +5.75132520e-11 * tc[3] -4.07295432e-15 * tc[4] -1.41518724e+04 * invT; /*species 10: CO2 */ species[10] = +2.85746029e+00 +2.20718513e-03 * tc[1] -7.38271347e-07 * tc[2] +1.30872547e-10 * tc[3] -9.44168328e-15 * tc[4] -4.87591660e+04 * invT; /*species 11: CH2O */ species[11] = +7.60690080e-01 +4.60000041e-03 * tc[1] -1.47419604e-06 * tc[2] +2.51603030e-10 * tc[3] -1.76771128e-14 * tc[4] -1.39958323e+04 * invT; /*species 12: C2H2 */ species[12] = +3.14756964e+00 +2.98083332e-03 * tc[1] -7.90982840e-07 * tc[2] +1.16853043e-10 * tc[3] -7.22470426e-15 * tc[4] +2.59359992e+04 * invT; /*species 13: C2H4 */ species[13] = +1.03611116e+00 +7.32270755e-03 * tc[1] -2.23692638e-06 * tc[2] +3.68057308e-10 * tc[3] -2.51412122e-14 * tc[4] +4.93988614e+03 * invT; /*species 14: C2H6 */ species[14] = +7.18815000e-02 +1.08426339e-02 * tc[1] -3.34186890e-06 * tc[2] +5.53530003e-10 * tc[3] -3.80005780e-14 * tc[4] -1.14263932e+04 * invT; /*species 15: NH3 */ species[15] = +1.63445210e+00 +2.83312800e-03 * tc[1] -5.75955867e-07 * tc[2] +5.96679025e-11 * tc[3] -2.51575720e-15 * tc[4] -6.54469580e+03 * invT; /*species 16: NO */ species[16] = +2.26060560e+00 +5.95552150e-04 * tc[1] -1.43056827e-07 * tc[2] +1.73644173e-11 * tc[3] -8.06721980e-16 * tc[4] +9.92097460e+03 * invT; /*species 17: HCN */ species[17] = +2.80223920e+00 +1.57321140e-03 * tc[1] -3.54406167e-07 * tc[2] +4.15493925e-11 * tc[3] -1.95995140e-15 * tc[4] +1.44072920e+04 * invT; /*species 18: N2 */ species[18] = +1.92664000e+00 +7.43988400e-04 * tc[1] -1.89492000e-07 * tc[2] +2.52425950e-11 * tc[3] -1.35067020e-15 * tc[4] -9.22797700e+02 * invT; } return; } /*compute the h/(RT) at the given temperature (Eq 20) */ /*tc contains precomputed powers of T, tc[0] = log(T) */ void speciesEnthalpy(double * restrict species, double * restrict tc) { /*temperature */ double T = tc[1]; double invT = 1 / T; /*species with midpoint at T=1000 kelvin */ if (T < 1000) { /*species 0: H2 */ species[0] = +2.34433112e+00 +3.99026037e-03 * tc[1] -6.49271700e-06 * tc[2] +5.03930235e-09 * tc[3] -1.47522352e-12 * tc[4] -9.17935173e+02 * invT; /*species 1: H */ species[1] = +2.50000000e+00 +3.52666409e-13 * tc[1] -6.65306547e-16 * tc[2] +5.75204080e-19 * tc[3] -1.85546466e-22 * tc[4] +2.54736599e+04 * invT; /*species 2: O2 */ species[2] = +3.78245636e+00 -1.49836708e-03 * tc[1] +3.28243400e-06 * tc[2] -2.42032377e-09 * tc[3] +6.48745674e-13 * tc[4] -1.06394356e+03 * invT; /*species 3: OH */ species[3] = +3.99201543e+00 -1.20065876e-03 * tc[1] +1.53931280e-06 * tc[2] -9.70283332e-10 * tc[3] +2.72822940e-13 * tc[4] +3.61508056e+03 * invT; /*species 4: H2O */ species[4] = +4.19864056e+00 -1.01821705e-03 * tc[1] +2.17346737e-06 * tc[2] -1.37199266e-09 * tc[3] +3.54395634e-13 * tc[4] -3.02937267e+04 * invT; /*species 5: HO2 */ species[5] = +4.30179801e+00 -2.37456025e-03 * tc[1] +7.05276303e-06 * tc[2] -6.06909735e-09 * tc[3] +1.85845025e-12 * tc[4] +2.94808040e+02 * invT; /*species 6: H2O2 */ species[6] = +4.27611269e+00 -2.71411208e-04 * tc[1] +5.57785670e-06 * tc[2] -5.39427032e-09 * tc[3] +1.72490873e-12 * tc[4] -1.77025821e+04 * invT; /*species 7: CH3 */ species[7] = +3.67359040e+00 +1.00547588e-03 * tc[1] +1.91007285e-06 * tc[2] -1.71779356e-09 * tc[3] +5.08771468e-13 * tc[4] +1.64449988e+04 * invT; /*species 8: CH4 */ species[8] = +5.14987613e+00 -6.83548940e-03 * tc[1] +1.63933533e-05 * tc[2] -1.21185757e-08 * tc[3] +3.33387912e-12 * tc[4] -1.02466476e+04 * invT; /*species 9: CO */ species[9] = +3.57953347e+00 -3.05176840e-04 * tc[1] +3.38938110e-07 * tc[2] +2.26751471e-10 * tc[3] -1.80884900e-13 * tc[4] -1.43440860e+04 * invT; /*species 10: CO2 */ species[10] = +2.35677352e+00 +4.49229839e-03 * tc[1] -2.37452090e-06 * tc[2] +6.14797555e-10 * tc[3] -2.87399096e-14 * tc[4] -4.83719697e+04 * invT; /*species 11: CH2O */ species[11] = +4.79372315e+00 -4.95416684e-03 * tc[1] +1.24406669e-05 * tc[2] -9.48213152e-09 * tc[3] +2.63545304e-12 * tc[4] -1.43089567e+04 * invT; /*species 12: C2H2 */ species[12] = +8.08681094e-01 +1.16807815e-02 * tc[1] -1.18390605e-05 * tc[2] +7.00381092e-09 * tc[3] -1.70014595e-12 * tc[4] +2.64289807e+04 * invT; /*species 13: C2H4 */ species[13] = +3.95920148e+00 -3.78526124e-03 * tc[1] +1.90330097e-05 * tc[2] -1.72897188e-08 * tc[3] +5.39768746e-12 * tc[4] +5.08977593e+03 * invT; /*species 14: C2H6 */ species[14] = +4.29142492e+00 -2.75077135e-03 * tc[1] +1.99812763e-05 * tc[2] -1.77116571e-08 * tc[3] +5.37371542e-12 * tc[4] -1.15222055e+04 * invT; /*species 15: NH3 */ species[15] = +4.28602740e+00 -2.33026150e-03 * tc[1] +7.23950433e-06 * tc[2] -5.70222175e-09 * tc[3] +1.65276092e-12 * tc[4] -6.74172850e+03 * invT; /*species 16: NO */ species[16] = +4.21847630e+00 -2.31948800e-03 * tc[1] +3.68034067e-06 * tc[2] -2.33403385e-09 * tc[3] +5.60715400e-13 * tc[4] +9.84462300e+03 * invT; /*species 17: HCN */ species[17] = +2.25898860e+00 +5.02558500e-03 * tc[1] -4.45058767e-06 * tc[2] +2.52308725e-09 * tc[3] -6.01780560e-13 * tc[4] +1.47126330e+04 * invT; /*species 18: N2 */ species[18] = +3.29867700e+00 +7.04120200e-04 * tc[1] -1.32107400e-06 * tc[2] +1.41037875e-09 * tc[3] -4.88970800e-13 * tc[4] -1.02089990e+03 * invT; } else { /*species 0: H2 */ species[0] = +3.33727920e+00 -2.47012365e-05 * tc[1] +1.66485593e-07 * tc[2] -4.48915985e-11 * tc[3] +4.00510752e-15 * tc[4] -9.50158922e+02 * invT; /*species 1: H */ species[1] = +2.50000001e+00 -1.15421486e-11 * tc[1] +5.38539827e-15 * tc[2] -1.18378809e-18 * tc[3] +9.96394714e-23 * tc[4] +2.54736599e+04 * invT; /*species 2: O2 */ species[2] = +3.28253784e+00 +7.41543770e-04 * tc[1] -2.52655556e-07 * tc[2] +5.23676387e-11 * tc[3] -4.33435588e-15 * tc[4] -1.08845772e+03 * invT; /*species 3: OH */ species[3] = +3.09288767e+00 +2.74214858e-04 * tc[1] +4.21684093e-08 * tc[2] -2.19865389e-11 * tc[3] +2.34824752e-15 * tc[4] +3.85865700e+03 * invT; /*species 4: H2O */ species[4] = +3.03399249e+00 +1.08845902e-03 * tc[1] -5.46908393e-08 * tc[2] -2.42604967e-11 * tc[3] +3.36401984e-15 * tc[4] -3.00042971e+04 * invT; /*species 5: HO2 */ species[5] = +4.01721090e+00 +1.11991006e-03 * tc[1] -2.11219383e-07 * tc[2] +2.85615925e-11 * tc[3] -2.15817070e-15 * tc[4] +1.11856713e+02 * invT; /*species 6: H2O2 */ species[6] = +4.16500285e+00 +2.45415847e-03 * tc[1] -6.33797417e-07 * tc[2] +9.27964965e-11 * tc[3] -5.75816610e-15 * tc[4] -1.78617877e+04 * invT; /*species 7: CH3 */ species[7] = +2.28571772e+00 +3.61995018e-03 * tc[1] -9.95714493e-07 * tc[2] +1.48921161e-10 * tc[3] -9.34308788e-15 * tc[4] +1.67755843e+04 * invT; /*species 8: CH4 */ species[8] = +7.48514950e-02 +6.69547335e-03 * tc[1] -1.91095270e-06 * tc[2] +3.05731338e-10 * tc[3] -2.03630460e-14 * tc[4] -9.46834459e+03 * invT; /*species 9: CO */ species[9] = +2.71518561e+00 +1.03126372e-03 * tc[1] -3.32941924e-07 * tc[2] +5.75132520e-11 * tc[3] -4.07295432e-15 * tc[4] -1.41518724e+04 * invT; /*species 10: CO2 */ species[10] = +3.85746029e+00 +2.20718513e-03 * tc[1] -7.38271347e-07 * tc[2] +1.30872547e-10 * tc[3] -9.44168328e-15 * tc[4] -4.87591660e+04 * invT; /*species 11: CH2O */ species[11] = +1.76069008e+00 +4.60000041e-03 * tc[1] -1.47419604e-06 * tc[2] +2.51603030e-10 * tc[3] -1.76771128e-14 * tc[4] -1.39958323e+04 * invT; /*species 12: C2H2 */ species[12] = +4.14756964e+00 +2.98083332e-03 * tc[1] -7.90982840e-07 * tc[2] +1.16853043e-10 * tc[3] -7.22470426e-15 * tc[4] +2.59359992e+04 * invT; /*species 13: C2H4 */ species[13] = +2.03611116e+00 +7.32270755e-03 * tc[1] -2.23692638e-06 * tc[2] +3.68057308e-10 * tc[3] -2.51412122e-14 * tc[4] +4.93988614e+03 * invT; /*species 14: C2H6 */ species[14] = +1.07188150e+00 +1.08426339e-02 * tc[1] -3.34186890e-06 * tc[2] +5.53530003e-10 * tc[3] -3.80005780e-14 * tc[4] -1.14263932e+04 * invT; /*species 15: NH3 */ species[15] = +2.63445210e+00 +2.83312800e-03 * tc[1] -5.75955867e-07 * tc[2] +5.96679025e-11 * tc[3] -2.51575720e-15 * tc[4] -6.54469580e+03 * invT; /*species 16: NO */ species[16] = +3.26060560e+00 +5.95552150e-04 * tc[1] -1.43056827e-07 * tc[2] +1.73644173e-11 * tc[3] -8.06721980e-16 * tc[4] +9.92097460e+03 * invT; /*species 17: HCN */ species[17] = +3.80223920e+00 +1.57321140e-03 * tc[1] -3.54406167e-07 * tc[2] +4.15493925e-11 * tc[3] -1.95995140e-15 * tc[4] +1.44072920e+04 * invT; /*species 18: N2 */ species[18] = +2.92664000e+00 +7.43988400e-04 * tc[1] -1.89492000e-07 * tc[2] +2.52425950e-11 * tc[3] -1.35067020e-15 * tc[4] -9.22797700e+02 * invT; } return; } /*compute the S/R at the given temperature (Eq 21) */ /*tc contains precomputed powers of T, tc[0] = log(T) */ void speciesEntropy(double * restrict species, double * restrict tc) { /*temperature */ double T = tc[1]; /*species with midpoint at T=1000 kelvin */ if (T < 1000) { /*species 0: H2 */ species[0] = +2.34433112e+00 * tc[0] +7.98052075e-03 * tc[1] -9.73907550e-06 * tc[2] +6.71906980e-09 * tc[3] -1.84402940e-12 * tc[4] +6.83010238e-01 ; /*species 1: H */ species[1] = +2.50000000e+00 * tc[0] +7.05332819e-13 * tc[1] -9.97959820e-16 * tc[2] +7.66938773e-19 * tc[3] -2.31933083e-22 * tc[4] -4.46682853e-01 ; /*species 2: O2 */ species[2] = +3.78245636e+00 * tc[0] -2.99673416e-03 * tc[1] +4.92365101e-06 * tc[2] -3.22709836e-09 * tc[3] +8.10932092e-13 * tc[4] +3.65767573e+00 ; /*species 3: OH */ species[3] = +3.99201543e+00 * tc[0] -2.40131752e-03 * tc[1] +2.30896920e-06 * tc[2] -1.29371111e-09 * tc[3] +3.41028675e-13 * tc[4] -1.03925458e-01 ; /*species 4: H2O */ species[4] = +4.19864056e+00 * tc[0] -2.03643410e-03 * tc[1] +3.26020105e-06 * tc[2] -1.82932354e-09 * tc[3] +4.42994543e-13 * tc[4] -8.49032208e-01 ; /*species 5: HO2 */ species[5] = +4.30179801e+00 * tc[0] -4.74912051e-03 * tc[1] +1.05791445e-05 * tc[2] -8.09212980e-09 * tc[3] +2.32306281e-12 * tc[4] +3.71666245e+00 ; /*species 6: H2O2 */ species[6] = +4.27611269e+00 * tc[0] -5.42822417e-04 * tc[1] +8.36678505e-06 * tc[2] -7.19236043e-09 * tc[3] +2.15613591e-12 * tc[4] +3.43505074e+00 ; /*species 7: CH3 */ species[7] = +3.67359040e+00 * tc[0] +2.01095175e-03 * tc[1] +2.86510928e-06 * tc[2] -2.29039142e-09 * tc[3] +6.35964335e-13 * tc[4] +1.60456433e+00 ; /*species 8: CH4 */ species[8] = +5.14987613e+00 * tc[0] -1.36709788e-02 * tc[1] +2.45900299e-05 * tc[2] -1.61581009e-08 * tc[3] +4.16734890e-12 * tc[4] -4.64130376e+00 ; /*species 9: CO */ species[9] = +3.57953347e+00 * tc[0] -6.10353680e-04 * tc[1] +5.08407165e-07 * tc[2] +3.02335295e-10 * tc[3] -2.26106125e-13 * tc[4] +3.50840928e+00 ; /*species 10: CO2 */ species[10] = +2.35677352e+00 * tc[0] +8.98459677e-03 * tc[1] -3.56178134e-06 * tc[2] +8.19730073e-10 * tc[3] -3.59248870e-14 * tc[4] +9.90105222e+00 ; /*species 11: CH2O */ species[11] = +4.79372315e+00 * tc[0] -9.90833369e-03 * tc[1] +1.86610004e-05 * tc[2] -1.26428420e-08 * tc[3] +3.29431630e-12 * tc[4] +6.02812900e-01 ; /*species 12: C2H2 */ species[12] = +8.08681094e-01 * tc[0] +2.33615629e-02 * tc[1] -1.77585907e-05 * tc[2] +9.33841457e-09 * tc[3] -2.12518243e-12 * tc[4] +1.39397051e+01 ; /*species 13: C2H4 */ species[13] = +3.95920148e+00 * tc[0] -7.57052247e-03 * tc[1] +2.85495146e-05 * tc[2] -2.30529584e-08 * tc[3] +6.74710933e-12 * tc[4] +4.09733096e+00 ; /*species 14: C2H6 */ species[14] = +4.29142492e+00 * tc[0] -5.50154270e-03 * tc[1] +2.99719144e-05 * tc[2] -2.36155428e-08 * tc[3] +6.71714427e-12 * tc[4] +2.66682316e+00 ; /*species 15: NH3 */ species[15] = +4.28602740e+00 * tc[0] -4.66052300e-03 * tc[1] +1.08592565e-05 * tc[2] -7.60296233e-09 * tc[3] +2.06595115e-12 * tc[4] -6.25372770e-01 ; /*species 16: NO */ species[16] = +4.21847630e+00 * tc[0] -4.63897600e-03 * tc[1] +5.52051100e-06 * tc[2] -3.11204513e-09 * tc[3] +7.00894250e-13 * tc[4] +2.28084640e+00 ; /*species 17: HCN */ species[17] = +2.25898860e+00 * tc[0] +1.00511700e-02 * tc[1] -6.67588150e-06 * tc[2] +3.36411633e-09 * tc[3] -7.52225700e-13 * tc[4] +8.91644190e+00 ; /*species 18: N2 */ species[18] = +3.29867700e+00 * tc[0] +1.40824040e-03 * tc[1] -1.98161100e-06 * tc[2] +1.88050500e-09 * tc[3] -6.11213500e-13 * tc[4] +3.95037200e+00 ; } else { /*species 0: H2 */ species[0] = +3.33727920e+00 * tc[0] -4.94024731e-05 * tc[1] +2.49728389e-07 * tc[2] -5.98554647e-11 * tc[3] +5.00638440e-15 * tc[4] -3.20502331e+00 ; /*species 1: H */ species[1] = +2.50000001e+00 * tc[0] -2.30842973e-11 * tc[1] +8.07809740e-15 * tc[2] -1.57838412e-18 * tc[3] +1.24549339e-22 * tc[4] -4.46682914e-01 ; /*species 2: O2 */ species[2] = +3.28253784e+00 * tc[0] +1.48308754e-03 * tc[1] -3.78983334e-07 * tc[2] +6.98235183e-11 * tc[3] -5.41794485e-15 * tc[4] +5.45323129e+00 ; /*species 3: OH */ species[3] = +3.09288767e+00 * tc[0] +5.48429716e-04 * tc[1] +6.32526140e-08 * tc[2] -2.93153852e-11 * tc[3] +2.93530940e-15 * tc[4] +4.47669610e+00 ; /*species 4: H2O */ species[4] = +3.03399249e+00 * tc[0] +2.17691804e-03 * tc[1] -8.20362590e-08 * tc[2] -3.23473290e-11 * tc[3] +4.20502480e-15 * tc[4] +4.96677010e+00 ; /*species 5: HO2 */ species[5] = +4.01721090e+00 * tc[0] +2.23982013e-03 * tc[1] -3.16829075e-07 * tc[2] +3.80821233e-11 * tc[3] -2.69771337e-15 * tc[4] +3.78510215e+00 ; /*species 6: H2O2 */ species[6] = +4.16500285e+00 * tc[0] +4.90831694e-03 * tc[1] -9.50696125e-07 * tc[2] +1.23728662e-10 * tc[3] -7.19770763e-15 * tc[4] +2.91615662e+00 ; /*species 7: CH3 */ species[7] = +2.28571772e+00 * tc[0] +7.23990037e-03 * tc[1] -1.49357174e-06 * tc[2] +1.98561548e-10 * tc[3] -1.16788599e-14 * tc[4] +8.48007179e+00 ; /*species 8: CH4 */ species[8] = +7.48514950e-02 * tc[0] +1.33909467e-02 * tc[1] -2.86642905e-06 * tc[2] +4.07641783e-10 * tc[3] -2.54538075e-14 * tc[4] +1.84373180e+01 ; /*species 9: CO */ species[9] = +2.71518561e+00 * tc[0] +2.06252743e-03 * tc[1] -4.99412886e-07 * tc[2] +7.66843360e-11 * tc[3] -5.09119290e-15 * tc[4] +7.81868772e+00 ; /*species 10: CO2 */ species[10] = +3.85746029e+00 * tc[0] +4.41437026e-03 * tc[1] -1.10740702e-06 * tc[2] +1.74496729e-10 * tc[3] -1.18021041e-14 * tc[4] +2.27163806e+00 ; /*species 11: CH2O */ species[11] = +1.76069008e+00 * tc[0] +9.20000082e-03 * tc[1] -2.21129406e-06 * tc[2] +3.35470707e-10 * tc[3] -2.20963910e-14 * tc[4] +1.36563230e+01 ; /*species 12: C2H2 */ species[12] = +4.14756964e+00 * tc[0] +5.96166664e-03 * tc[1] -1.18647426e-06 * tc[2] +1.55804057e-10 * tc[3] -9.03088033e-15 * tc[4] -1.23028121e+00 ; /*species 13: C2H4 */ species[13] = +2.03611116e+00 * tc[0] +1.46454151e-02 * tc[1] -3.35538958e-06 * tc[2] +4.90743077e-10 * tc[3] -3.14265152e-14 * tc[4] +1.03053693e+01 ; /*species 14: C2H6 */ species[14] = +1.07188150e+00 * tc[0] +2.16852677e-02 * tc[1] -5.01280335e-06 * tc[2] +7.38040003e-10 * tc[3] -4.75007225e-14 * tc[4] +1.51156107e+01 ; /*species 15: NH3 */ species[15] = +2.63445210e+00 * tc[0] +5.66625600e-03 * tc[1] -8.63933800e-07 * tc[2] +7.95572033e-11 * tc[3] -3.14469650e-15 * tc[4] +6.56629280e+00 ; /*species 16: NO */ species[16] = +3.26060560e+00 * tc[0] +1.19110430e-03 * tc[1] -2.14585240e-07 * tc[2] +2.31525563e-11 * tc[3] -1.00840247e-15 * tc[4] +6.36930270e+00 ; /*species 17: HCN */ species[17] = +3.80223920e+00 * tc[0] +3.14642280e-03 * tc[1] -5.31609250e-07 * tc[2] +5.53991900e-11 * tc[3] -2.44993925e-15 * tc[4] +1.57546010e+00 ; /*species 18: N2 */ species[18] = +2.92664000e+00 * tc[0] +1.48797680e-03 * tc[1] -2.84238000e-07 * tc[2] +3.36567933e-11 * tc[3] -1.68833775e-15 * tc[4] +5.98052800e+00 ; } return; } /*save molecular weights into array */ void molecularWeight(double * restrict wt) { wt[0] = 2.015940; /*H2 */ wt[1] = 1.007970; /*H */ wt[2] = 31.998800; /*O2 */ wt[3] = 17.007370; /*OH */ wt[4] = 18.015340; /*H2O */ wt[5] = 33.006770; /*HO2 */ wt[6] = 34.014740; /*H2O2 */ wt[7] = 15.035060; /*CH3 */ wt[8] = 16.043030; /*CH4 */ wt[9] = 28.010550; /*CO */ wt[10] = 44.009950; /*CO2 */ wt[11] = 30.026490; /*CH2O */ wt[12] = 26.038240; /*C2H2 */ wt[13] = 28.054180; /*C2H4 */ wt[14] = 30.070120; /*C2H6 */ wt[15] = 17.030610; /*NH3 */ wt[16] = 30.006100; /*NO */ wt[17] = 27.025820; /*HCN */ wt[18] = 28.013400; /*N2 */ return; } /*save atomic weights into array */ void atomicWeight(double * restrict awt) { awt[0] = 15.999400; /*O */ awt[1] = 1.007970; /*H */ awt[2] = 12.011150; /*C */ awt[3] = 14.006700; /*N */ return; } /* get temperature given internal energy in mass units and mass fracs */ void GET_T_GIVEN_EY(double * restrict e, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict t, int * ierr) { #ifdef CONVERGENCE const int maxiter = 5000; const double tol = 1.e-12; #else const int maxiter = 200; const double tol = 1.e-6; #endif double ein = *e; double tmin = 90;/*max lower bound for thermo def */ double tmax = 4000;/*min upper bound for thermo def */ double e1,emin,emax,cv,t1,dt; int i;/* loop counter */ CKUBMS(&tmin, y, iwrk, rwrk, &emin); CKUBMS(&tmax, y, iwrk, rwrk, &emax); if (ein < emin) { /*Linear Extrapolation below tmin */ CKCVBS(&tmin, y, iwrk, rwrk, &cv); *t = tmin - (emin-ein)/cv; *ierr = 1; return; } if (ein > emax) { /*Linear Extrapolation above tmax */ CKCVBS(&tmax, y, iwrk, rwrk, &cv); *t = tmax - (emax-ein)/cv; *ierr = 1; return; } t1 = *t; if (t1 < tmin || t1 > tmax) { t1 = tmin + (tmax-tmin)/(emax-emin)*(ein-emin); } for (i = 0; i < maxiter; ++i) { CKUBMS(&t1,y,iwrk,rwrk,&e1); CKCVBS(&t1,y,iwrk,rwrk,&cv); dt = (ein - e1) / cv; if (dt > 100.) { dt = 100.; } else if (dt < -100.) { dt = -100.; } else if (fabs(dt) < tol) break; else if (t1+dt == t1) break; t1 += dt; } *t = t1; *ierr = 0; return; } /* get temperature given enthalpy in mass units and mass fracs */ void GET_T_GIVEN_HY(double * restrict h, double * restrict y, int * iwrk, double * restrict rwrk, double * restrict t, int * ierr) { #ifdef CONVERGENCE const int maxiter = 5000; const double tol = 1.e-12; #else const int maxiter = 200; const double tol = 1.e-6; #endif double hin = *h; double tmin = 90;/*max lower bound for thermo def */ double tmax = 4000;/*min upper bound for thermo def */ double h1,hmin,hmax,cp,t1,dt; int i;/* loop counter */ CKHBMS(&tmin, y, iwrk, rwrk, &hmin); CKHBMS(&tmax, y, iwrk, rwrk, &hmax); if (hin < hmin) { /*Linear Extrapolation below tmin */ CKCPBS(&tmin, y, iwrk, rwrk, &cp); *t = tmin - (hmin-hin)/cp; *ierr = 1; return; } if (hin > hmax) { /*Linear Extrapolation above tmax */ CKCPBS(&tmax, y, iwrk, rwrk, &cp); *t = tmax - (hmax-hin)/cp; *ierr = 1; return; } t1 = *t; if (t1 < tmin || t1 > tmax) { t1 = tmin + (tmax-tmin)/(hmax-hmin)*(hin-hmin); } for (i = 0; i < maxiter; ++i) { CKHBMS(&t1,y,iwrk,rwrk,&h1); CKCPBS(&t1,y,iwrk,rwrk,&cp); dt = (hin - h1) / cp; if (dt > 100.) { dt = 100.; } else if (dt < -100.) { dt = -100.; } else if (fabs(dt) < tol) break; else if (t1+dt == t1) break; t1 += dt; } *t = t1; *ierr = 0; return; } /*compute the critical parameters for each species */ void GET_CRITPARAMS(double * restrict Tci, double * restrict ai, double * restrict bi, double * restrict acentric_i) { double EPS[19]; double SIG[19]; double wt[19]; double avogadro = 6.02214199e23; double boltzmann = 1.3806503e-16; //we work in CGS double Rcst = 83.144598; //in bar [CGS] ! egtransetEPS(EPS); egtransetSIG(SIG); molecularWeight(wt); /*species 0: H2 */ /*Imported from NIST */ Tci[0] = 33.145000 ; ai[0] = 1e6 * 0.42748 * pow(Rcst,2.0) * pow(Tci[0],2.0) / (pow(2.015880,2.0) * 12.964000); bi[0] = 0.08664 * Rcst * Tci[0] / (2.015880 * 12.964000); acentric_i[0] = -0.219000 ; /*species 1: H */ Tci[1] = 1.316 * EPS[1] ; ai[1] = (5.55 * pow(avogadro,2.0) * EPS[1]*boltzmann * pow(1e-8*SIG[1],3.0) ) / (pow(wt[1],2.0)); bi[1] = 0.855 * avogadro * pow(1e-8*SIG[1],3.0) / (wt[1]); acentric_i[1] = 0.0 ; /*species 2: O2 */ /*Imported from NIST */ Tci[2] = 154.581000 ; ai[2] = 1e6 * 0.42748 * pow(Rcst,2.0) * pow(Tci[2],2.0) / (pow(31.998800,2.0) * 50.430466); bi[2] = 0.08664 * Rcst * Tci[2] / (31.998800 * 50.430466); acentric_i[2] = 0.022200 ; /*species 3: OH */ Tci[3] = 1.316 * EPS[3] ; ai[3] = (5.55 * pow(avogadro,2.0) * EPS[3]*boltzmann * pow(1e-8*SIG[3],3.0) ) / (pow(wt[3],2.0)); bi[3] = 0.855 * avogadro * pow(1e-8*SIG[3],3.0) / (wt[3]); acentric_i[3] = 0.0 ; /*species 4: H2O */ /*Imported from NIST */ Tci[4] = 647.096000 ; ai[4] = 1e6 * 0.42748 * pow(Rcst,2.0) * pow(Tci[4],2.0) / (pow(18.015340,2.0) * 220.640000); bi[4] = 0.08664 * Rcst * Tci[4] / (18.015340 * 220.640000); acentric_i[4] = 0.344300 ; /*species 5: HO2 */ Tci[5] = 1.316 * EPS[5] ; ai[5] = (5.55 * pow(avogadro,2.0) * EPS[5]*boltzmann * pow(1e-8*SIG[5],3.0) ) / (pow(wt[5],2.0)); bi[5] = 0.855 * avogadro * pow(1e-8*SIG[5],3.0) / (wt[5]); acentric_i[5] = 0.0 ; /*species 6: H2O2 */ Tci[6] = 1.316 * EPS[6] ; ai[6] = (5.55 * pow(avogadro,2.0) * EPS[6]*boltzmann * pow(1e-8*SIG[6],3.0) ) / (pow(wt[6],2.0)); bi[6] = 0.855 * avogadro * pow(1e-8*SIG[6],3.0) / (wt[6]); acentric_i[6] = 0.0 ; /*species 7: CH3 */ Tci[7] = 1.316 * EPS[7] ; ai[7] = (5.55 * pow(avogadro,2.0) * EPS[7]*boltzmann * pow(1e-8*SIG[7],3.0) ) / (pow(wt[7],2.0)); bi[7] = 0.855 * avogadro * pow(1e-8*SIG[7],3.0) / (wt[7]); acentric_i[7] = 0.0 ; /*species 8: CH4 */ /*Imported from NIST */ Tci[8] = 190.560000 ; ai[8] = 1e6 * 0.42748 * pow(Rcst,2.0) * pow(Tci[8],2.0) / (pow(16.043030,2.0) * 45.990000); bi[8] = 0.08664 * Rcst * Tci[8] / (16.043030 * 45.990000); acentric_i[8] = 0.011000 ; /*species 9: CO */ /*Imported from NIST */ Tci[9] = 132.850000 ; ai[9] = 1e6 * 0.42748 * pow(Rcst,2.0) * pow(Tci[9],2.0) / (pow(28.010000,2.0) * 34.940000); bi[9] = 0.08664 * Rcst * Tci[9] / (28.010000 * 34.940000); acentric_i[9] = 0.045000 ; /*species 10: CO2 */ /*Imported from NIST */ Tci[10] = 304.120000 ; ai[10] = 1e6 * 0.42748 * pow(Rcst,2.0) * pow(Tci[10],2.0) / (pow(44.009950,2.0) * 73.740000); bi[10] = 0.08664 * Rcst * Tci[10] / (44.009950 * 73.740000); acentric_i[10] = 0.225000 ; /*species 11: CH2O */ Tci[11] = 1.316 * EPS[11] ; ai[11] = (5.55 * pow(avogadro,2.0) * EPS[11]*boltzmann * pow(1e-8*SIG[11],3.0) ) / (pow(wt[11],2.0)); bi[11] = 0.855 * avogadro * pow(1e-8*SIG[11],3.0) / (wt[11]); acentric_i[11] = 0.0 ; /*species 12: C2H2 */ /*Imported from NIST */ Tci[12] = 308.300000 ; ai[12] = 1e6 * 0.42748 * pow(Rcst,2.0) * pow(Tci[12],2.0) / (pow(26.038000,2.0) * 61.140000); bi[12] = 0.08664 * Rcst * Tci[12] / (26.038000 * 61.140000); acentric_i[12] = 0.189000 ; /*species 13: C2H4 */ /*Imported from NIST */ Tci[13] = 282.340000 ; ai[13] = 1e6 * 0.42748 * pow(Rcst,2.0) * pow(Tci[13],2.0) / (pow(28.054000,2.0) * 50.410000); bi[13] = 0.08664 * Rcst * Tci[13] / (28.054000 * 50.410000); acentric_i[13] = 0.087000 ; /*species 14: C2H6 */ /*Imported from NIST */ Tci[14] = 305.320000 ; ai[14] = 1e6 * 0.42748 * pow(Rcst,2.0) * pow(Tci[14],2.0) / (pow(30.070120,2.0) * 48.720000); bi[14] = 0.08664 * Rcst * Tci[14] / (30.070120 * 48.720000); acentric_i[14] = 0.099000 ; /*species 15: NH3 */ Tci[15] = 1.316 * EPS[15] ; ai[15] = (5.55 * pow(avogadro,2.0) * EPS[15]*boltzmann * pow(1e-8*SIG[15],3.0) ) / (pow(wt[15],2.0)); bi[15] = 0.855 * avogadro * pow(1e-8*SIG[15],3.0) / (wt[15]); acentric_i[15] = 0.0 ; /*species 16: NO */ /*Imported from NIST */ Tci[16] = 180.000000 ; ai[16] = 1e6 * 0.42748 * pow(Rcst,2.0) * pow(Tci[16],2.0) / (pow(30.006000,2.0) * 64.800000); bi[16] = 0.08664 * Rcst * Tci[16] / (30.006000 * 64.800000); acentric_i[16] = 0.582000 ; /*species 17: HCN */ Tci[17] = 1.316 * EPS[17] ; ai[17] = (5.55 * pow(avogadro,2.0) * EPS[17]*boltzmann * pow(1e-8*SIG[17],3.0) ) / (pow(wt[17],2.0)); bi[17] = 0.855 * avogadro * pow(1e-8*SIG[17],3.0) / (wt[17]); acentric_i[17] = 0.0 ; /*species 18: N2 */ /*Imported from NIST */ Tci[18] = 126.192000 ; ai[18] = 1e6 * 0.42748 * pow(Rcst,2.0) * pow(Tci[18],2.0) / (pow(28.013400,2.0) * 33.958000); bi[18] = 0.08664 * Rcst * Tci[18] / (28.013400 * 33.958000); acentric_i[18] = 0.037200 ; return; } /* End of file */ #if defined(BL_FORT_USE_UPPERCASE) #define egtransetLENIMC EGTRANSETLENIMC #elif defined(BL_FORT_USE_LOWERCASE) #define egtransetLENIMC egtransetlenimc #elif defined(BL_FORT_USE_UNDERSCORE) #define egtransetLENIMC egtransetlenimc_ #endif void egtransetLENIMC(int* LENIMC) { *LENIMC = 70;} #if defined(BL_FORT_USE_UPPERCASE) #define egtransetLENRMC EGTRANSETLENRMC #elif defined(BL_FORT_USE_LOWERCASE) #define egtransetLENRMC egtransetlenrmc #elif defined(BL_FORT_USE_UNDERSCORE) #define egtransetLENRMC egtransetlenrmc_ #endif void egtransetLENRMC(int* LENRMC) { *LENRMC = 6086;} #if defined(BL_FORT_USE_UPPERCASE) #define egtransetNO EGTRANSETNO #elif defined(BL_FORT_USE_LOWERCASE) #define egtransetNO egtransetno #elif defined(BL_FORT_USE_UNDERSCORE) #define egtransetNO egtransetno_ #endif void egtransetNO(int* NO) { *NO = 4;} #if defined(BL_FORT_USE_UPPERCASE) #define egtransetKK EGTRANSETKK #elif defined(BL_FORT_USE_LOWERCASE) #define egtransetKK egtransetkk #elif defined(BL_FORT_USE_UNDERSCORE) #define egtransetKK egtransetkk_ #endif void egtransetKK(int* KK) { *KK = 17;} #if defined(BL_FORT_USE_UPPERCASE) #define egtransetNLITE EGTRANSETNLITE #elif defined(BL_FORT_USE_LOWERCASE) #define egtransetNLITE egtransetnlite #elif defined(BL_FORT_USE_UNDERSCORE) #define egtransetNLITE egtransetnlite_ #endif void egtransetNLITE(int* NLITE) { *NLITE = 2;} #if defined(BL_FORT_USE_UPPERCASE) #define egtransetPATM EGTRANSETPATM #elif defined(BL_FORT_USE_LOWERCASE) #define egtransetPATM egtransetpatm #elif defined(BL_FORT_USE_UNDERSCORE) #define egtransetPATM egtransetpatm_ #endif void egtransetPATM(double* PATM) { *PATM = 0.1013250000000000E+07;} #if defined(BL_FORT_USE_UPPERCASE) #define egtransetWT EGTRANSETWT #elif defined(BL_FORT_USE_LOWERCASE) #define egtransetWT egtransetwt #elif defined(BL_FORT_USE_UNDERSCORE) #define egtransetWT egtransetwt_ #endif void egtransetWT(double* WT) { WT[ 0] = 0.2015939950942993E+01; WT[ 1] = 0.1007969975471497E+01; WT[ 2] = 0.3199880027770996E+02; WT[ 3] = 0.1700737011432648E+02; WT[ 4] = 0.1801534008979797E+02; WT[ 5] = 0.3300677025318146E+02; WT[ 6] = 0.3401474022865295E+02; WT[ 7] = 0.1503506028652191E+02; WT[ 8] = 0.1604303026199341E+02; WT[ 9] = 0.2801055049896240E+02; WT[ 10] = 0.4400995063781738E+02; WT[ 11] = 0.3002649044990540E+02; WT[ 12] = 0.2603824067115784E+02; WT[ 13] = 0.2805418062210083E+02; WT[ 14] = 0.3007012057304382E+02; WT[ 15] = 0.1703060948848724E+02; WT[ 16] = 0.1400669956207275E+02; }; #if defined(BL_FORT_USE_UPPERCASE) #define egtransetEPS EGTRANSETEPS #elif defined(BL_FORT_USE_LOWERCASE) #define egtransetEPS egtranseteps #elif defined(BL_FORT_USE_UNDERSCORE) #define egtransetEPS egtranseteps_ #endif void egtransetEPS(double* EPS) { EPS[ 0] = 0.3800000000000000E+02; EPS[ 1] = 0.1450000000000000E+03; EPS[ 2] = 0.1074000000000000E+03; EPS[ 3] = 0.8000000000000000E+02; EPS[ 4] = 0.5724000000000000E+03; EPS[ 5] = 0.1074000000000000E+03; EPS[ 6] = 0.1074000000000000E+03; EPS[ 7] = 0.1440000000000000E+03; EPS[ 8] = 0.1414000000000000E+03; EPS[ 9] = 0.9809999999999999E+02; EPS[ 10] = 0.2440000000000000E+03; EPS[ 11] = 0.4980000000000000E+03; EPS[ 12] = 0.2090000000000000E+03; EPS[ 13] = 0.2808000000000000E+03; EPS[ 14] = 0.2523000000000000E+03; EPS[ 15] = 0.4810000000000000E+03; EPS[ 16] = 0.7140000000000001E+02; }; #if defined(BL_FORT_USE_UPPERCASE) #define egtransetSIG EGTRANSETSIG #elif defined(BL_FORT_USE_LOWERCASE) #define egtransetSIG egtransetsig #elif defined(BL_FORT_USE_UNDERSCORE) #define egtransetSIG egtransetsig_ #endif void egtransetSIG(double* SIG) { SIG[ 0] = 0.2920000000000000E+01; SIG[ 1] = 0.2050000000000000E+01; SIG[ 2] = 0.3458000000000000E+01; SIG[ 3] = 0.2750000000000000E+01; SIG[ 4] = 0.2605000000000000E+01; SIG[ 5] = 0.3458000000000000E+01; SIG[ 6] = 0.3458000000000000E+01; SIG[ 7] = 0.3800000000000000E+01; SIG[ 8] = 0.3746000000000000E+01; SIG[ 9] = 0.3650000000000000E+01; SIG[ 10] = 0.3763000000000000E+01; SIG[ 11] = 0.3590000000000000E+01; SIG[ 12] = 0.4100000000000000E+01; SIG[ 13] = 0.3971000000000000E+01; SIG[ 14] = 0.4302000000000000E+01; SIG[ 15] = 0.2920000000000000E+01; SIG[ 16] = 0.3298000000000000E+01; }; #if defined(BL_FORT_USE_UPPERCASE) #define egtransetDIP EGTRANSETDIP #elif defined(BL_FORT_USE_LOWERCASE) #define egtransetDIP egtransetdip #elif defined(BL_FORT_USE_UNDERSCORE) #define egtransetDIP egtransetdip_ #endif void egtransetDIP(double* DIP) { DIP[ 0] = 0.0000000000000000E+00; DIP[ 1] = 0.0000000000000000E+00; DIP[ 2] = 0.0000000000000000E+00; DIP[ 3] = 0.0000000000000000E+00; DIP[ 4] = 0.1844000000000000E+01; DIP[ 5] = 0.0000000000000000E+00; DIP[ 6] = 0.0000000000000000E+00; DIP[ 7] = 0.0000000000000000E+00; DIP[ 8] = 0.0000000000000000E+00; DIP[ 9] = 0.0000000000000000E+00; DIP[ 10] = 0.0000000000000000E+00; DIP[ 11] = 0.0000000000000000E+00; DIP[ 12] = 0.0000000000000000E+00; DIP[ 13] = 0.0000000000000000E+00; DIP[ 14] = 0.0000000000000000E+00; DIP[ 15] = 0.1470000000000000E+01; DIP[ 16] = 0.0000000000000000E+00; }; #if defined(BL_FORT_USE_UPPERCASE) #define egtransetPOL EGTRANSETPOL #elif defined(BL_FORT_USE_LOWERCASE) #define egtransetPOL egtransetpol #elif defined(BL_FORT_USE_UNDERSCORE) #define egtransetPOL egtransetpol_ #endif void egtransetPOL(double* POL) { POL[ 0] = 0.7900000000000000E+00; POL[ 1] = 0.0000000000000000E+00; POL[ 2] = 0.1600000000000000E+01; POL[ 3] = 0.0000000000000000E+00; POL[ 4] = 0.0000000000000000E+00; POL[ 5] = 0.0000000000000000E+00; POL[ 6] = 0.0000000000000000E+00; POL[ 7] = 0.0000000000000000E+00; POL[ 8] = 0.2600000000000000E+01; POL[ 9] = 0.1950000000000000E+01; POL[ 10] = 0.2650000000000000E+01; POL[ 11] = 0.0000000000000000E+00; POL[ 12] = 0.0000000000000000E+00; POL[ 13] = 0.0000000000000000E+00; POL[ 14] = 0.0000000000000000E+00; POL[ 15] = 0.0000000000000000E+00; POL[ 16] = 0.0000000000000000E+00; }; #if defined(BL_FORT_USE_UPPERCASE) #define egtransetZROT EGTRANSETZROT #elif defined(BL_FORT_USE_LOWERCASE) #define egtransetZROT egtransetzrot #elif defined(BL_FORT_USE_UNDERSCORE) #define egtransetZROT egtransetzrot_ #endif void egtransetZROT(double* ZROT) { ZROT[ 0] = 0.2800000000000000E+03; ZROT[ 1] = 0.0000000000000000E+00; ZROT[ 2] = 0.3800000000000000E+01; ZROT[ 3] = 0.0000000000000000E+00; ZROT[ 4] = 0.4000000000000000E+01; ZROT[ 5] = 0.1000000000000000E+01; ZROT[ 6] = 0.3800000000000000E+01; ZROT[ 7] = 0.0000000000000000E+00; ZROT[ 8] = 0.1300000000000000E+02; ZROT[ 9] = 0.1800000000000000E+01; ZROT[ 10] = 0.2100000000000000E+01; ZROT[ 11] = 0.2000000000000000E+01; ZROT[ 12] = 0.2500000000000000E+01; ZROT[ 13] = 0.1500000000000000E+01; ZROT[ 14] = 0.1500000000000000E+01; ZROT[ 15] = 0.1000000000000000E+02; ZROT[ 16] = 0.0000000000000000E+00; }; #if defined(BL_FORT_USE_UPPERCASE) #define egtransetNLIN EGTRANSETNLIN #elif defined(BL_FORT_USE_LOWERCASE) #define egtransetNLIN egtransetnlin #elif defined(BL_FORT_USE_UNDERSCORE) #define egtransetNLIN egtransetnlin_ #endif void egtransetNLIN(int* NLIN) { NLIN[ 0] = 1; NLIN[ 1] = 0; NLIN[ 2] = 1; NLIN[ 3] = 1; NLIN[ 4] = 2; NLIN[ 5] = 2; NLIN[ 6] = 2; NLIN[ 7] = 1; NLIN[ 8] = 2; NLIN[ 9] = 1; NLIN[ 10] = 1; NLIN[ 11] = 2; NLIN[ 12] = 1; NLIN[ 13] = 2; NLIN[ 14] = 2; NLIN[ 15] = 2; NLIN[ 16] = 0; }; #if defined(BL_FORT_USE_UPPERCASE) #define egtransetCOFLAM EGTRANSETCOFLAM #elif defined(BL_FORT_USE_LOWERCASE) #define egtransetCOFLAM egtransetcoflam #elif defined(BL_FORT_USE_UNDERSCORE) #define egtransetCOFLAM egtransetcoflam_ #endif void egtransetCOFLAM(double* COFLAM) { COFLAM[ 0] = 0.4338938909590841E+01; COFLAM[ 1] = 0.1557129190255232E+01; COFLAM[ 2] = -0.1611482772000911E+00; COFLAM[ 3] = 0.9924831871819461E-02; COFLAM[ 4] = -0.1277787249296346E+01; COFLAM[ 5] = 0.3820054734917150E+01; COFLAM[ 6] = -0.4198233798779158E+00; COFLAM[ 7] = 0.1850208398735055E-01; COFLAM[ 8] = 0.5325069412912258E+00; COFLAM[ 9] = 0.1869914528199901E+01; COFLAM[ 10] = -0.1314362832820327E+00; COFLAM[ 11] = 0.5215120926063267E-02; COFLAM[ 12] = 0.9093696928027398E+01; COFLAM[ 13] = -0.1120775592691738E+01; COFLAM[ 14] = 0.2389873914448641E+00; COFLAM[ 15] = -0.9739555323602451E-02; COFLAM[ 16] = 0.1823299080601272E+02; COFLAM[ 17] = -0.6774533358601863E+01; COFLAM[ 18] = 0.1218719282523822E+01; COFLAM[ 19] = -0.6135010154070134E-01; COFLAM[ 20] = 0.3326317596296849E+01; COFLAM[ 21] = 0.4132335554940667E+00; COFLAM[ 22] = 0.1130570823333984E+00; COFLAM[ 23] = -0.7338162598088820E-02; COFLAM[ 24] = 0.2915877425653172E+01; COFLAM[ 25] = 0.4587330741194222E+00; COFLAM[ 26] = 0.1387858388240978E+00; COFLAM[ 27] = -0.9951989822150117E-02; COFLAM[ 28] = 0.1076771824317687E+02; COFLAM[ 29] = -0.3230785742845311E+01; COFLAM[ 30] = 0.7028613656761165E+00; COFLAM[ 31] = -0.3786794222679780E-01; COFLAM[ 32] = 0.1770109820287010E+02; COFLAM[ 33] = -0.6715184244214581E+01; COFLAM[ 34] = 0.1263352812473795E+01; COFLAM[ 35] = -0.6629210623948648E-01; COFLAM[ 36] = 0.8169761711928375E+01; COFLAM[ 37] = -0.1536354590024332E+01; COFLAM[ 38] = 0.3677852316201770E+00; COFLAM[ 39] = -0.1908151636745373E-01; COFLAM[ 40] = -0.8741249063434607E+01; COFLAM[ 41] = 0.4789682737843632E+01; COFLAM[ 42] = -0.4182583551792970E+00; COFLAM[ 43] = 0.1350148479415579E-01; COFLAM[ 44] = 0.1432241064124226E+02; COFLAM[ 45] = -0.6063127607975618E+01; COFLAM[ 46] = 0.1238070730356745E+01; COFLAM[ 47] = -0.6822324216312974E-01; COFLAM[ 48] = -0.1080654352351750E+02; COFLAM[ 49] = 0.5871580561001701E+01; COFLAM[ 50] = -0.5858009165203976E+00; COFLAM[ 51] = 0.2242112198055643E-01; COFLAM[ 52] = -0.4034386834241054E+01; COFLAM[ 53] = 0.1906845175798028E+01; COFLAM[ 54] = 0.1176313823060724E+00; COFLAM[ 55] = -0.1610854731203796E-01; COFLAM[ 56] = -0.2116070664089148E+01; COFLAM[ 57] = 0.9961322379018972E+00; COFLAM[ 58] = 0.2612385762947667E+00; COFLAM[ 59] = -0.2335852593974065E-01; COFLAM[ 60] = 0.1756249168236376E+02; COFLAM[ 61] = -0.6844841344645484E+01; COFLAM[ 62] = 0.1296574793469918E+01; COFLAM[ 63] = -0.6830286748937907E-01; COFLAM[ 64] = 0.1334087340220970E+01; COFLAM[ 65] = 0.1967564865885963E+01; COFLAM[ 66] = -0.1801563268324271E+00; COFLAM[ 67] = 0.8161858212342093E-02; }; #if defined(BL_FORT_USE_UPPERCASE) #define egtransetCOFETA EGTRANSETCOFETA #elif defined(BL_FORT_USE_LOWERCASE) #define egtransetCOFETA egtransetcofeta #elif defined(BL_FORT_USE_UNDERSCORE) #define egtransetCOFETA egtransetcofeta_ #endif void egtransetCOFETA(double* COFETA) { COFETA[ 0] = -0.1405365243967108E+02; COFETA[ 1] = 0.1093067315260161E+01; COFETA[ 2] = -0.6261814846952836E-01; COFETA[ 3] = 0.2905012497485487E-02; COFETA[ 4] = -0.2082770253853309E+02; COFETA[ 5] = 0.3820054734917064E+01; COFETA[ 6] = -0.4198233798779034E+00; COFETA[ 7] = 0.1850208398734995E-01; COFETA[ 8] = -0.1788787183918792E+02; COFETA[ 9] = 0.2980942390384383E+01; COFETA[ 10] = -0.3137212871769931E+00; COFETA[ 11] = 0.1402845871347659E-01; COFETA[ 12] = -0.1575591740228496E+02; COFETA[ 13] = 0.2214372539700379E+01; COFETA[ 14] = -0.2131246920584266E+00; COFETA[ 15] = 0.9629058498016490E-02; COFETA[ 16] = -0.1132071455875007E+02; COFETA[ 17] = -0.1024187591396996E+01; COFETA[ 18] = 0.3672551958155785E+00; COFETA[ 19] = -0.2140495811549247E-01; COFETA[ 20] = -0.1787236469476987E+02; COFETA[ 21] = 0.2980942390384401E+01; COFETA[ 22] = -0.3137212871769956E+00; COFETA[ 23] = 0.1402845871347671E-01; COFETA[ 24] = -0.1785732406117402E+02; COFETA[ 25] = 0.2980942390384431E+01; COFETA[ 26] = -0.3137212871770002E+00; COFETA[ 27] = 0.1402845871347693E-01; COFETA[ 28] = -0.2064817961278261E+02; COFETA[ 29] = 0.3796664365547116E+01; COFETA[ 30] = -0.4168591838616045E+00; COFETA[ 31] = 0.1837669926760990E-01; COFETA[ 32] = -0.2044795093120063E+02; COFETA[ 33] = 0.3746140041781780E+01; COFETA[ 34] = -0.4106325045512437E+00; COFETA[ 35] = 0.1812122592865459E-01; COFETA[ 36] = -0.1745394886003354E+02; COFETA[ 37] = 0.2749779734238378E+01; COFETA[ 38] = -0.2838005922559860E+00; COFETA[ 39] = 0.1273750763474062E-01; COFETA[ 40] = -0.2279534579079101E+02; COFETA[ 41] = 0.4622798720286126E+01; COFETA[ 42] = -0.4997444888565643E+00; COFETA[ 43] = 0.2095793339101399E-01; COFETA[ 44] = -0.1574565551467780E+02; COFETA[ 45] = 0.9879045622370226E+00; COFETA[ 46] = 0.7005046117242672E-01; COFETA[ 47] = -0.7652880399373136E-02; COFETA[ 48] = -0.2285054221476539E+02; COFETA[ 49] = 0.4573684124304381E+01; COFETA[ 50] = -0.5044627764440257E+00; COFETA[ 51] = 0.2162047229656920E-01; COFETA[ 52] = -0.2294894266555372E+02; COFETA[ 53] = 0.4436068808351478E+01; COFETA[ 54] = -0.4620857825400307E+00; COFETA[ 55] = 0.1877687773259413E-01; COFETA[ 56] = -0.2324874285445497E+02; COFETA[ 57] = 0.4594844249509825E+01; COFETA[ 58] = -0.4931221039570846E+00; COFETA[ 59] = 0.2054776726722393E-01; COFETA[ 60] = -0.1419898917157052E+02; COFETA[ 61] = 0.3629587083500767E+00; COFETA[ 62] = 0.1568120182105773E+00; COFETA[ 63] = -0.1151267609047790E-01; COFETA[ 64] = -0.1558423057668358E+02; COFETA[ 65] = 0.1967564865886029E+01; COFETA[ 66] = -0.1801563268324368E+00; COFETA[ 67] = 0.8161858212342565E-02; }; #if defined(BL_FORT_USE_UPPERCASE) #define egtransetCOFD EGTRANSETCOFD #elif defined(BL_FORT_USE_LOWERCASE) #define egtransetCOFD egtransetcofd #elif defined(BL_FORT_USE_UNDERSCORE) #define egtransetCOFD egtransetcofd_ #endif void egtransetCOFD(double* COFD) { COFD[ 0] = -0.1041995820481732E+02; COFD[ 1] = 0.2234441574336671E+01; COFD[ 2] = -0.8113717628974293E-01; COFD[ 3] = 0.3771369728593280E-02; COFD[ 4] = -0.1246996149641938E+02; COFD[ 5] = 0.3218029211436469E+01; COFD[ 6] = -0.2110489360993846E+00; COFD[ 7] = 0.9480877224339204E-02; COFD[ 8] = -0.1309173178181482E+02; COFD[ 9] = 0.3085278377844914E+01; COFD[ 10] = -0.1946824097426824E+00; COFD[ 11] = 0.8807381240537916E-02; COFD[ 12] = -0.1175032758512814E+02; COFD[ 13] = 0.2642045353939402E+01; COFD[ 14] = -0.1332637450759486E+00; COFD[ 15] = 0.5973546569611733E-02; COFD[ 16] = -0.1757366663670086E+02; COFD[ 17] = 0.4778618797909657E+01; COFD[ 18] = -0.3996041319366184E+00; COFD[ 19] = 0.1705904378841811E-01; COFD[ 20] = -0.1309613046501959E+02; COFD[ 21] = 0.3086801219615145E+01; COFD[ 22] = -0.1948957472387874E+00; COFD[ 23] = 0.8817294833761086E-02; COFD[ 24] = -0.1310029609771552E+02; COFD[ 25] = 0.3088245187916686E+01; COFD[ 26] = -0.1950980328845662E+00; COFD[ 27] = 0.8826694775135658E-02; COFD[ 28] = -0.1401582227027964E+02; COFD[ 29] = 0.3415426664334749E+01; COFD[ 30] = -0.2385853115052575E+00; COFD[ 31] = 0.1075469512610122E-01; COFD[ 32] = -0.1394568249588876E+02; COFD[ 33] = 0.3392701225291731E+01; COFD[ 34] = -0.2355263017693934E+00; COFD[ 35] = 0.1061747669734484E-01; COFD[ 36] = -0.1288505703505633E+02; COFD[ 37] = 0.2980129383938292E+01; COFD[ 38] = -0.1801545917674105E+00; COFD[ 39] = 0.8138117217492757E-02; COFD[ 40] = -0.1558361674716745E+02; COFD[ 41] = 0.3987098568284434E+01; COFD[ 42] = -0.3109866036899985E+00; COFD[ 43] = 0.1380795341338578E-01; COFD[ 44] = -0.1688564690991911E+02; COFD[ 45] = 0.4415865473479337E+01; COFD[ 46] = -0.3564104485793251E+00; COFD[ 47] = 0.1533855847922275E-01; COFD[ 48] = -0.1548207029093904E+02; COFD[ 49] = 0.3939084188623417E+01; COFD[ 50] = -0.3072761108436687E+00; COFD[ 51] = 0.1375770270895890E-01; COFD[ 52] = -0.1606152931877502E+02; COFD[ 53] = 0.4146430359164761E+01; COFD[ 54] = -0.3312210923439469E+00; COFD[ 55] = 0.1466430884967795E-01; COFD[ 56] = -0.1596236229610858E+02; COFD[ 57] = 0.4079842750882008E+01; COFD[ 58] = -0.3231960602071978E+00; COFD[ 59] = 0.1434305777680907E-01; COFD[ 60] = -0.1682806034335371E+02; COFD[ 61] = 0.4485450500929320E+01; COFD[ 62] = -0.3655695289846634E+00; COFD[ 63] = 0.1574614737371121E-01; COFD[ 64] = -0.1163276178298392E+02; COFD[ 65] = 0.2530205071288130E+01; COFD[ 66] = -0.1183776350934922E+00; COFD[ 67] = 0.5312988352557955E-02; COFD[ 68] = -0.1246996149641938E+02; COFD[ 69] = 0.3218029211436469E+01; COFD[ 70] = -0.2110489360993846E+00; COFD[ 71] = 0.9480877224339204E-02; COFD[ 72] = -0.1523325703608638E+02; COFD[ 73] = 0.4393426193693418E+01; COFD[ 74] = -0.3557313428561681E+00; COFD[ 75] = 0.1541554803417400E-01; COFD[ 76] = -0.1615679289714481E+02; COFD[ 77] = 0.4416835307133869E+01; COFD[ 78] = -0.3582737234170646E+00; COFD[ 79] = 0.1548663572743747E-01; COFD[ 80] = -0.1489056810719442E+02; COFD[ 81] = 0.4079672697171075E+01; COFD[ 82] = -0.3197193422647769E+00; COFD[ 83] = 0.1404463618351796E-01; COFD[ 84] = -0.1615075966577466E+02; COFD[ 85] = 0.4076548482999812E+01; COFD[ 86] = -0.2637510941604614E+00; COFD[ 87] = 0.9371841080052064E-02; COFD[ 88] = -0.1616058613782880E+02; COFD[ 89] = 0.4418087193226073E+01; COFD[ 90] = -0.3584223903851448E+00; COFD[ 91] = 0.1549243249303281E-01; COFD[ 92] = -0.1616421015670871E+02; COFD[ 93] = 0.4419285820914456E+01; COFD[ 94] = -0.3585647608630650E+00; COFD[ 95] = 0.1549798503551403E-01; COFD[ 96] = -0.1668220221378553E+02; COFD[ 97] = 0.4517665522196412E+01; COFD[ 98] = -0.3643177404654742E+00; COFD[ 99] = 0.1544861119982853E-01; COFD[ 100] = -0.1661124059439949E+02; COFD[ 101] = 0.4499983815327449E+01; COFD[ 102] = -0.3624203284397300E+00; COFD[ 103] = 0.1538350856590451E-01; COFD[ 104] = -0.1640877269708571E+02; COFD[ 105] = 0.4519498926662536E+01; COFD[ 106] = -0.3750329779403845E+00; COFD[ 107] = 0.1635855421757920E-01; COFD[ 108] = -0.1664237108262573E+02; COFD[ 109] = 0.4321207774376617E+01; COFD[ 110] = -0.3181315549506706E+00; COFD[ 111] = 0.1258308183870717E-01; COFD[ 112] = -0.1567567878102999E+02; COFD[ 113] = 0.3706302510623138E+01; COFD[ 114] = -0.2096236244277931E+00; COFD[ 115] = 0.6748353057110522E-02; COFD[ 116] = -0.1685361380845761E+02; COFD[ 117] = 0.4411251776453823E+01; COFD[ 118] = -0.3350756131548513E+00; COFD[ 119] = 0.1353157937321782E-01; COFD[ 120] = -0.1635823810097287E+02; COFD[ 121] = 0.4123977007004703E+01; COFD[ 122] = -0.2854728908229589E+00; COFD[ 123] = 0.1088433914065527E-01; COFD[ 124] = -0.1646195534760534E+02; COFD[ 125] = 0.4141822899037704E+01; COFD[ 126] = -0.2892308013569180E+00; COFD[ 127] = 0.1109614462742981E-01; COFD[ 128] = -0.1626359848098758E+02; COFD[ 129] = 0.4116788683843708E+01; COFD[ 130] = -0.2733289330957227E+00; COFD[ 131] = 0.9943035956817482E-02; COFD[ 132] = -0.1522605770447405E+02; COFD[ 133] = 0.4154863253653862E+01; COFD[ 134] = -0.3321404775716639E+00; COFD[ 135] = 0.1469592836326692E-01; COFD[ 136] = -0.1309173178181482E+02; COFD[ 137] = 0.3085278377844914E+01; COFD[ 138] = -0.1946824097426824E+00; COFD[ 139] = 0.8807381240537916E-02; COFD[ 140] = -0.1615679289714481E+02; COFD[ 141] = 0.4416835307133869E+01; COFD[ 142] = -0.3582737234170646E+00; COFD[ 143] = 0.1548663572743747E-01; COFD[ 144] = -0.1633859102926806E+02; COFD[ 145] = 0.3804061718963007E+01; COFD[ 146] = -0.2844545503177112E+00; COFD[ 147] = 0.1254577088876504E-01; COFD[ 148] = -0.1549441379638191E+02; COFD[ 149] = 0.3661937775617350E+01; COFD[ 150] = -0.2686571425887965E+00; COFD[ 151] = 0.1197472750180553E-01; COFD[ 152] = -0.1922081039841695E+02; COFD[ 153] = 0.4711535347815869E+01; COFD[ 154] = -0.3619579159000997E+00; COFD[ 155] = 0.1426886476331241E-01; COFD[ 156] = -0.1634649054694938E+02; COFD[ 157] = 0.3804144246517877E+01; COFD[ 158] = -0.2844651891354975E+00; COFD[ 159] = 0.1254622626209081E-01; COFD[ 160] = -0.1635443062264980E+02; COFD[ 161] = 0.3804381702146611E+01; COFD[ 162] = -0.2844957999638338E+00; COFD[ 163] = 0.1254753649276844E-01; COFD[ 164] = -0.1710012222365139E+02; COFD[ 165] = 0.4123808736773267E+01; COFD[ 166] = -0.3236376370297240E+00; COFD[ 167] = 0.1414381001550686E-01; COFD[ 168] = -0.1705130515010214E+02; COFD[ 169] = 0.4103994885241071E+01; COFD[ 170] = -0.3212499365446575E+00; COFD[ 171] = 0.1404820978963865E-01; COFD[ 172] = -0.1615768146554276E+02; COFD[ 173] = 0.3734150883385773E+01; COFD[ 174] = -0.2760989842105895E+00; COFD[ 175] = 0.1221438663500826E-01; COFD[ 176] = -0.1871735010457793E+02; COFD[ 177] = 0.4573421885700214E+01; COFD[ 178] = -0.3757214086933051E+00; COFD[ 179] = 0.1614451502251515E-01; COFD[ 180] = -0.1977364410044468E+02; COFD[ 181] = 0.4876531592391508E+01; COFD[ 182] = -0.3981741984755758E+00; COFD[ 183] = 0.1643476290195535E-01; COFD[ 184] = -0.1830650296367939E+02; COFD[ 185] = 0.4452394507404697E+01; COFD[ 186] = -0.3624991200767191E+00; COFD[ 187] = 0.1567286266089603E-01; COFD[ 188] = -0.1904759289384530E+02; COFD[ 189] = 0.4700929573683383E+01; COFD[ 190] = -0.3903608191209737E+00; COFD[ 191] = 0.1670229359855150E-01; COFD[ 192] = -0.1883799365221673E+02; COFD[ 193] = 0.4594611640098751E+01; COFD[ 194] = -0.3781168670160998E+00; COFD[ 195] = 0.1623467419613562E-01; COFD[ 196] = -0.1934710272078301E+02; COFD[ 197] = 0.4822779400529180E+01; COFD[ 198] = -0.3871228449439424E+00; COFD[ 199] = 0.1578686341252481E-01; COFD[ 200] = -0.1527049784849730E+02; COFD[ 201] = 0.3538365317994180E+01; COFD[ 202] = -0.2526814234121028E+00; COFD[ 203] = 0.1128573168209360E-01; COFD[ 204] = -0.1175032758512814E+02; COFD[ 205] = 0.2642045353939402E+01; COFD[ 206] = -0.1332637450759486E+00; COFD[ 207] = 0.5973546569611733E-02; COFD[ 208] = -0.1489056810719442E+02; COFD[ 209] = 0.4079672697171075E+01; COFD[ 210] = -0.3197193422647769E+00; COFD[ 211] = 0.1404463618351796E-01; COFD[ 212] = -0.1549441379638191E+02; COFD[ 213] = 0.3661937775617350E+01; COFD[ 214] = -0.2686571425887965E+00; COFD[ 215] = 0.1197472750180553E-01; COFD[ 216] = -0.1408727594805337E+02; COFD[ 217] = 0.3263692027745076E+01; COFD[ 218] = -0.2164586669743292E+00; COFD[ 219] = 0.9693817632062094E-02; COFD[ 220] = -0.1876923157317771E+02; COFD[ 221] = 0.4853854184816717E+01; COFD[ 222] = -0.3998497982364926E+00; COFD[ 223] = 0.1669227468405473E-01; COFD[ 224] = -0.1551249966394157E+02; COFD[ 225] = 0.3667301435894143E+01; COFD[ 226] = -0.2693892845913942E+00; COFD[ 227] = 0.1200791993248851E-01; COFD[ 228] = -0.1553013778710249E+02; COFD[ 229] = 0.3672587405633829E+01; COFD[ 230] = -0.2701107938194129E+00; COFD[ 231] = 0.1204062933869048E-01; COFD[ 232] = -0.1596628118290846E+02; COFD[ 233] = 0.3841273000042274E+01; COFD[ 234] = -0.2898297027626218E+00; COFD[ 235] = 0.1280193859902638E-01; COFD[ 236] = -0.1591206444980951E+02; COFD[ 237] = 0.3820917505869285E+01; COFD[ 238] = -0.2872227297192593E+00; COFD[ 239] = 0.1269037691099738E-01; COFD[ 240] = -0.1524511379647520E+02; COFD[ 241] = 0.3553053597674808E+01; COFD[ 242] = -0.2545212965294182E+00; COFD[ 243] = 0.1136271070888101E-01; COFD[ 244] = -0.1768659705517047E+02; COFD[ 245] = 0.4367905899188474E+01; COFD[ 246] = -0.3516645879639785E+00; COFD[ 247] = 0.1520186726409712E-01; COFD[ 248] = -0.1891518602118661E+02; COFD[ 249] = 0.4756739681751712E+01; COFD[ 250] = -0.3886685070061102E+00; COFD[ 251] = 0.1625748441332711E-01; COFD[ 252] = -0.1730907989620245E+02; COFD[ 253] = 0.4243749180870270E+01; COFD[ 254] = -0.3387466685616417E+00; COFD[ 255] = 0.1477905643158847E-01; COFD[ 256] = -0.1795985745949001E+02; COFD[ 257] = 0.4461091096847464E+01; COFD[ 258] = -0.3624212969270659E+00; COFD[ 259] = 0.1561553586311572E-01; COFD[ 260] = -0.1782798069130013E+02; COFD[ 261] = 0.4381516462622495E+01; COFD[ 262] = -0.3532970918753254E+00; COFD[ 263] = 0.1526804966457564E-01; COFD[ 264] = -0.1863707722655403E+02; COFD[ 265] = 0.4806992114833483E+01; COFD[ 266] = -0.3984196894018535E+00; COFD[ 267] = 0.1681856268295304E-01; COFD[ 268] = -0.1395220381010925E+02; COFD[ 269] = 0.3159688299904867E+01; COFD[ 270] = -0.2028455188834762E+00; COFD[ 271] = 0.9099190221367019E-02; COFD[ 272] = -0.1757366663670086E+02; COFD[ 273] = 0.4778618797909657E+01; COFD[ 274] = -0.3996041319366184E+00; COFD[ 275] = 0.1705904378841811E-01; COFD[ 276] = -0.1615075966577466E+02; COFD[ 277] = 0.4076548482999812E+01; COFD[ 278] = -0.2637510941604614E+00; COFD[ 279] = 0.9371841080052064E-02; COFD[ 280] = -0.1922081039841695E+02; COFD[ 281] = 0.4711535347815869E+01; COFD[ 282] = -0.3619579159000997E+00; COFD[ 283] = 0.1426886476331241E-01; COFD[ 284] = -0.1876923157317771E+02; COFD[ 285] = 0.4853854184816717E+01; COFD[ 286] = -0.3998497982364926E+00; COFD[ 287] = 0.1669227468405473E-01; COFD[ 288] = -0.1159394277352764E+02; COFD[ 289] = 0.8274415074083117E+00; COFD[ 290] = 0.2507810870995229E+00; COFD[ 291] = -0.1608920912964349E-01; COFD[ 292] = -0.1918278409127273E+02; COFD[ 293] = 0.4757442212360504E+01; COFD[ 294] = -0.3754949936858373E+00; COFD[ 295] = 0.1515864657024127E-01; COFD[ 296] = -0.1917291211841810E+02; COFD[ 297] = 0.4750317181782021E+01; COFD[ 298] = -0.3743997734248102E+00; COFD[ 299] = 0.1510405742415517E-01; COFD[ 300] = -0.1924307146615256E+02; COFD[ 301] = 0.4719749979653547E+01; COFD[ 302] = -0.3616169791891675E+00; COFD[ 303] = 0.1420601032870719E-01; COFD[ 304] = -0.1899850780979201E+02; COFD[ 305] = 0.4543165455349725E+01; COFD[ 306] = -0.3288657155661528E+00; COFD[ 307] = 0.1239537851241695E-01; COFD[ 308] = -0.1935427072301072E+02; COFD[ 309] = 0.4779618521455131E+01; COFD[ 310] = -0.3741503724582112E+00; COFD[ 311] = 0.1493019768764606E-01; COFD[ 312] = -0.1719414340129819E+02; COFD[ 313] = 0.3490298819499314E+01; COFD[ 314] = -0.1625329575445097E+00; COFD[ 315] = 0.4043757780510787E-02; COFD[ 316] = -0.1468058225757294E+02; COFD[ 317] = 0.2298576535112391E+01; COFD[ 318] = 0.1810975986268222E-01; COFD[ 319] = -0.4731609614535192E-02; COFD[ 320] = -0.1868831150678477E+02; COFD[ 321] = 0.4247585976166683E+01; COFD[ 322] = -0.2815735628073077E+00; COFD[ 323] = 0.1000602366593966E-01; COFD[ 324] = -0.1767910948230725E+02; COFD[ 325] = 0.3722086580781685E+01; COFD[ 326] = -0.1980254146986777E+00; COFD[ 327] = 0.5799038224028695E-02; COFD[ 328] = -0.1813805288510309E+02; COFD[ 329] = 0.3907124675584541E+01; COFD[ 330] = -0.2273991439446390E+00; COFD[ 331] = 0.7280601459305962E-02; COFD[ 332] = -0.1316791411191276E+02; COFD[ 333] = 0.1635596276910710E+01; COFD[ 334] = 0.1248562922809231E+00; COFD[ 335] = -0.1002143991895203E-01; COFD[ 336] = -0.1875637020961290E+02; COFD[ 337] = 0.4821872691746870E+01; COFD[ 338] = -0.3987313751514457E+00; COFD[ 339] = 0.1676685214877852E-01; COFD[ 340] = -0.1309613046501959E+02; COFD[ 341] = 0.3086801219615145E+01; COFD[ 342] = -0.1948957472387874E+00; COFD[ 343] = 0.8817294833761086E-02; COFD[ 344] = -0.1616058613782880E+02; COFD[ 345] = 0.4418087193226073E+01; COFD[ 346] = -0.3584223903851448E+00; COFD[ 347] = 0.1549243249303281E-01; COFD[ 348] = -0.1634649054694938E+02; COFD[ 349] = 0.3804144246517877E+01; COFD[ 350] = -0.2844651891354975E+00; COFD[ 351] = 0.1254622626209081E-01; COFD[ 352] = -0.1551249966394157E+02; COFD[ 353] = 0.3667301435894143E+01; COFD[ 354] = -0.2693892845913942E+00; COFD[ 355] = 0.1200791993248851E-01; COFD[ 356] = -0.1918278409127273E+02; COFD[ 357] = 0.4757442212360504E+01; COFD[ 358] = -0.3754949936858373E+00; COFD[ 359] = 0.1515864657024127E-01; COFD[ 360] = -0.1635409817368614E+02; COFD[ 361] = 0.3804061718963002E+01; COFD[ 362] = -0.2844545503177105E+00; COFD[ 363] = 0.1254577088876500E-01; COFD[ 364] = -0.1636175579013954E+02; COFD[ 365] = 0.3804139357106159E+01; COFD[ 366] = -0.2844645588307625E+00; COFD[ 367] = 0.1254619928317991E-01; COFD[ 368] = -0.1710991586467964E+02; COFD[ 369] = 0.4125646326528441E+01; COFD[ 370] = -0.3238554740258663E+00; COFD[ 371] = 0.1415227419625766E-01; COFD[ 372] = -0.1706120101779605E+02; COFD[ 373] = 0.4105791991868936E+01; COFD[ 374] = -0.3214635787029052E+00; COFD[ 375] = 0.1405654084975138E-01; COFD[ 376] = -0.1616672146196606E+02; COFD[ 377] = 0.3734919571093601E+01; COFD[ 378] = -0.2762018861637152E+00; COFD[ 379] = 0.1221896274296108E-01; COFD[ 380] = -0.1872670165824808E+02; COFD[ 381] = 0.4573903479896379E+01; COFD[ 382] = -0.3758340786206700E+00; COFD[ 383] = 0.1615161324401359E-01; COFD[ 384] = -0.1978049689264223E+02; COFD[ 385] = 0.4876281709421428E+01; COFD[ 386] = -0.3981401304053240E+00; COFD[ 387] = 0.1643325343977014E-01; COFD[ 388] = -0.1831322697803230E+02; COFD[ 389] = 0.4452261161272578E+01; COFD[ 390] = -0.3624715246876605E+00; COFD[ 391] = 0.1567121551911240E-01; COFD[ 392] = -0.1905419842832635E+02; COFD[ 393] = 0.4700662819460902E+01; COFD[ 394] = -0.3903206776226419E+00; COFD[ 395] = 0.1670032573134212E-01; COFD[ 396] = -0.1884529530697174E+02; COFD[ 397] = 0.4594634055023398E+01; COFD[ 398] = -0.3781333057516071E+00; COFD[ 399] = 0.1623599168682821E-01; COFD[ 400] = -0.1919185322703664E+02; COFD[ 401] = 0.4784033288807873E+01; COFD[ 402] = -0.3849459064100618E+00; COFD[ 403] = 0.1580648990064550E-01; COFD[ 404] = -0.1528586419146581E+02; COFD[ 405] = 0.3542872339374690E+01; COFD[ 406] = -0.2532972672017028E+00; COFD[ 407] = 0.1131367837559019E-01; COFD[ 408] = -0.1310029609771552E+02; COFD[ 409] = 0.3088245187916686E+01; COFD[ 410] = -0.1950980328845662E+00; COFD[ 411] = 0.8826694775135658E-02; COFD[ 412] = -0.1616421015670871E+02; COFD[ 413] = 0.4419285820914456E+01; COFD[ 414] = -0.3585647608630650E+00; COFD[ 415] = 0.1549798503551403E-01; COFD[ 416] = -0.1635443062264980E+02; COFD[ 417] = 0.3804381702146611E+01; COFD[ 418] = -0.2844957999638338E+00; COFD[ 419] = 0.1254753649276844E-01; COFD[ 420] = -0.1553013778710249E+02; COFD[ 421] = 0.3672587405633829E+01; COFD[ 422] = -0.2701107938194129E+00; COFD[ 423] = 0.1204062933869048E-01; COFD[ 424] = -0.1917291211841810E+02; COFD[ 425] = 0.4750317181782021E+01; COFD[ 426] = -0.3743997734248102E+00; COFD[ 427] = 0.1510405742415517E-01; COFD[ 428] = -0.1636175579013954E+02; COFD[ 429] = 0.3804139357106159E+01; COFD[ 430] = -0.2844645588307625E+00; COFD[ 431] = 0.1254619928317991E-01; COFD[ 432] = -0.1636913880728208E+02; COFD[ 433] = 0.3804061718963007E+01; COFD[ 434] = -0.2844545503177112E+00; COFD[ 435] = 0.1254577088876504E-01; COFD[ 436] = -0.1711940058927942E+02; COFD[ 437] = 0.4127460426680499E+01; COFD[ 438] = -0.3240705310922555E+00; COFD[ 439] = 0.1416063080699452E-01; COFD[ 440] = -0.1707080539569448E+02; COFD[ 441] = 0.4107575519112949E+01; COFD[ 442] = -0.3216756148320791E+00; COFD[ 443] = 0.1406480989264605E-01; COFD[ 444] = -0.1617584857259722E+02; COFD[ 445] = 0.3735860038531880E+01; COFD[ 446] = -0.2763277124196010E+00; COFD[ 447] = 0.1222455543181157E-01; COFD[ 448] = -0.1873560076291526E+02; COFD[ 449] = 0.4574325021189105E+01; COFD[ 450] = -0.3759338652876284E+00; COFD[ 451] = 0.1615792922437573E-01; COFD[ 452] = -0.1978644159772173E+02; COFD[ 453] = 0.4875758120324897E+01; COFD[ 454] = -0.3980631994813040E+00; COFD[ 455] = 0.1642957450530744E-01; COFD[ 456] = -0.1831968967523493E+02; COFD[ 457] = 0.4452134774757628E+01; COFD[ 458] = -0.3624420830935042E+00; COFD[ 459] = 0.1566936578675393E-01; COFD[ 460] = -0.1906036212420135E+02; COFD[ 461] = 0.4700323443044125E+01; COFD[ 462] = -0.3902665913786998E+00; COFD[ 463] = 0.1669755465043006E-01; COFD[ 464] = -0.1885218260686505E+02; COFD[ 465] = 0.4594596529989258E+01; COFD[ 466] = -0.3781371531513473E+00; COFD[ 467] = 0.1623655159949430E-01; COFD[ 468] = -0.1918517200292939E+02; COFD[ 469] = 0.4778400021399422E+01; COFD[ 470] = -0.3840658172050402E+00; COFD[ 471] = 0.1576203256152660E-01; COFD[ 472] = -0.1530075071186979E+02; COFD[ 473] = 0.3547276678457322E+01; COFD[ 474] = -0.2538990508478524E+00; COFD[ 475] = 0.1134098602286849E-01; COFD[ 476] = -0.1401582227027964E+02; COFD[ 477] = 0.3415426664334749E+01; COFD[ 478] = -0.2385853115052575E+00; COFD[ 479] = 0.1075469512610122E-01; COFD[ 480] = -0.1668220221378553E+02; COFD[ 481] = 0.4517665522196412E+01; COFD[ 482] = -0.3643177404654742E+00; COFD[ 483] = 0.1544861119982853E-01; COFD[ 484] = -0.1710012222365139E+02; COFD[ 485] = 0.4123808736773267E+01; COFD[ 486] = -0.3236376370297240E+00; COFD[ 487] = 0.1414381001550686E-01; COFD[ 488] = -0.1596628118290846E+02; COFD[ 489] = 0.3841273000042274E+01; COFD[ 490] = -0.2898297027626218E+00; COFD[ 491] = 0.1280193859902638E-01; COFD[ 492] = -0.1924307146615256E+02; COFD[ 493] = 0.4719749979653547E+01; COFD[ 494] = -0.3616169791891675E+00; COFD[ 495] = 0.1420601032870719E-01; COFD[ 496] = -0.1710991586467964E+02; COFD[ 497] = 0.4125646326528441E+01; COFD[ 498] = -0.3238554740258663E+00; COFD[ 499] = 0.1415227419625766E-01; COFD[ 500] = -0.1711940058927942E+02; COFD[ 501] = 0.4127460426680499E+01; COFD[ 502] = -0.3240705310922555E+00; COFD[ 503] = 0.1416063080699452E-01; COFD[ 504] = -0.1776945810612460E+02; COFD[ 505] = 0.4375557614241441E+01; COFD[ 506] = -0.3535212257934470E+00; COFD[ 507] = 0.1532421336526862E-01; COFD[ 508] = -0.1770845512212784E+02; COFD[ 509] = 0.4353002298484032E+01; COFD[ 510] = -0.3507494100853503E+00; COFD[ 511] = 0.1521052013931193E-01; COFD[ 512] = -0.1695356550477676E+02; COFD[ 513] = 0.4068213336589554E+01; COFD[ 514] = -0.3177710408064736E+00; COFD[ 515] = 0.1394701157712223E-01; COFD[ 516] = -0.1913993542632300E+02; COFD[ 517] = 0.4730620081114692E+01; COFD[ 518] = -0.3883890489649657E+00; COFD[ 519] = 0.1637150502726411E-01; COFD[ 520] = -0.1958734834972716E+02; COFD[ 521] = 0.4743227463757541E+01; COFD[ 522] = -0.3686474607497347E+00; COFD[ 523] = 0.1466232449273593E-01; COFD[ 524] = -0.1892132288088230E+02; COFD[ 525] = 0.4686257416133524E+01; COFD[ 526] = -0.3876178620425971E+00; COFD[ 527] = 0.1654627973246352E-01; COFD[ 528] = -0.1934401530563503E+02; COFD[ 529] = 0.4788242539311199E+01; COFD[ 530] = -0.3933254249328604E+00; COFD[ 531] = 0.1648505821153991E-01; COFD[ 532] = -0.1926514266945471E+02; COFD[ 533] = 0.4743728688652060E+01; COFD[ 534] = -0.3899270700585402E+00; COFD[ 535] = 0.1643466452124135E-01; COFD[ 536] = -0.1944043258564383E+02; COFD[ 537] = 0.4828743160693618E+01; COFD[ 538] = -0.3828021863328961E+00; COFD[ 539] = 0.1540186987470838E-01; COFD[ 540] = -0.1573476318984406E+02; COFD[ 541] = 0.3712839423735899E+01; COFD[ 542] = -0.2734220687823376E+00; COFD[ 543] = 0.1210212757075583E-01; COFD[ 544] = -0.1394568249588876E+02; COFD[ 545] = 0.3392701225291731E+01; COFD[ 546] = -0.2355263017693934E+00; COFD[ 547] = 0.1061747669734484E-01; COFD[ 548] = -0.1661124059439949E+02; COFD[ 549] = 0.4499983815327449E+01; COFD[ 550] = -0.3624203284397300E+00; COFD[ 551] = 0.1538350856590451E-01; COFD[ 552] = -0.1705130515010214E+02; COFD[ 553] = 0.4103994885241071E+01; COFD[ 554] = -0.3212499365446575E+00; COFD[ 555] = 0.1404820978963865E-01; COFD[ 556] = -0.1591206444980951E+02; COFD[ 557] = 0.3820917505869285E+01; COFD[ 558] = -0.2872227297192593E+00; COFD[ 559] = 0.1269037691099738E-01; COFD[ 560] = -0.1899850780979201E+02; COFD[ 561] = 0.4543165455349725E+01; COFD[ 562] = -0.3288657155661528E+00; COFD[ 563] = 0.1239537851241695E-01; COFD[ 564] = -0.1706120101779605E+02; COFD[ 565] = 0.4105791991868936E+01; COFD[ 566] = -0.3214635787029052E+00; COFD[ 567] = 0.1405654084975138E-01; COFD[ 568] = -0.1707080539569448E+02; COFD[ 569] = 0.4107575519112949E+01; COFD[ 570] = -0.3216756148320791E+00; COFD[ 571] = 0.1406480989264605E-01; COFD[ 572] = -0.1770845512212784E+02; COFD[ 573] = 0.4353002298484032E+01; COFD[ 574] = -0.3507494100853503E+00; COFD[ 575] = 0.1521052013931193E-01; COFD[ 576] = -0.1766523778708329E+02; COFD[ 577] = 0.4337952155636497E+01; COFD[ 578] = -0.3490538655747895E+00; COFD[ 579] = 0.1514793254733080E-01; COFD[ 580] = -0.1690087953694522E+02; COFD[ 581] = 0.4046945935955795E+01; COFD[ 582] = -0.3151742473102278E+00; COFD[ 583] = 0.1384162361172659E-01; COFD[ 584] = -0.1913011857829392E+02; COFD[ 585] = 0.4728539523227080E+01; COFD[ 586] = -0.3887605628634140E+00; COFD[ 587] = 0.1641451966125198E-01; COFD[ 588] = -0.1962180466510501E+02; COFD[ 589] = 0.4762356031601866E+01; COFD[ 590] = -0.3720986023672735E+00; COFD[ 591] = 0.1485121157223651E-01; COFD[ 592] = -0.1887963312848053E+02; COFD[ 593] = 0.4671422222663214E+01; COFD[ 594] = -0.3861362810478654E+00; COFD[ 595] = 0.1650068107616750E-01; COFD[ 596] = -0.1933004429336219E+02; COFD[ 597] = 0.4785693509382203E+01; COFD[ 598] = -0.3936420075604487E+00; COFD[ 599] = 0.1652573779841039E-01; COFD[ 600] = -0.1926485085357470E+02; COFD[ 601] = 0.4746666060969545E+01; COFD[ 602] = -0.3910387566978661E+00; COFD[ 603] = 0.1651352760050653E-01; COFD[ 604] = -0.1941730561245821E+02; COFD[ 605] = 0.4788578363438019E+01; COFD[ 606] = -0.3737994268951498E+00; COFD[ 607] = 0.1486050997611076E-01; COFD[ 608] = -0.1569001110440693E+02; COFD[ 609] = 0.3696584277188425E+01; COFD[ 610] = -0.2713867054547939E+00; COFD[ 611] = 0.1201712146112774E-01; COFD[ 612] = -0.1288505703505633E+02; COFD[ 613] = 0.2980129383938292E+01; COFD[ 614] = -0.1801545917674105E+00; COFD[ 615] = 0.8138117217492757E-02; COFD[ 616] = -0.1640877269708571E+02; COFD[ 617] = 0.4519498926662536E+01; COFD[ 618] = -0.3750329779403845E+00; COFD[ 619] = 0.1635855421757920E-01; COFD[ 620] = -0.1615768146554276E+02; COFD[ 621] = 0.3734150883385773E+01; COFD[ 622] = -0.2760989842105895E+00; COFD[ 623] = 0.1221438663500826E-01; COFD[ 624] = -0.1524511379647520E+02; COFD[ 625] = 0.3553053597674808E+01; COFD[ 626] = -0.2545212965294182E+00; COFD[ 627] = 0.1136271070888101E-01; COFD[ 628] = -0.1935427072301072E+02; COFD[ 629] = 0.4779618521455131E+01; COFD[ 630] = -0.3741503724582112E+00; COFD[ 631] = 0.1493019768764606E-01; COFD[ 632] = -0.1616672146196606E+02; COFD[ 633] = 0.3734919571093601E+01; COFD[ 634] = -0.2762018861637152E+00; COFD[ 635] = 0.1221896274296108E-01; COFD[ 636] = -0.1617584857259722E+02; COFD[ 637] = 0.3735860038531880E+01; COFD[ 638] = -0.2763277124196010E+00; COFD[ 639] = 0.1222455543181157E-01; COFD[ 640] = -0.1695356550477676E+02; COFD[ 641] = 0.4068213336589554E+01; COFD[ 642] = -0.3177710408064736E+00; COFD[ 643] = 0.1394701157712223E-01; COFD[ 644] = -0.1690087953694522E+02; COFD[ 645] = 0.4046945935955795E+01; COFD[ 646] = -0.3151742473102278E+00; COFD[ 647] = 0.1384162361172659E-01; COFD[ 648] = -0.1594065528219861E+02; COFD[ 649] = 0.3646923650815759E+01; COFD[ 650] = -0.2651273478800260E+00; COFD[ 651] = 0.1175431470196917E-01; COFD[ 652] = -0.1856069485825941E+02; COFD[ 653] = 0.4522343094041945E+01; COFD[ 654] = -0.3705240910138461E+00; COFD[ 655] = 0.1597653817363581E-01; COFD[ 656] = -0.1966898676800112E+02; COFD[ 657] = 0.4852357825322780E+01; COFD[ 658] = -0.3976302929205434E+00; COFD[ 659] = 0.1651454081234691E-01; COFD[ 660] = -0.1805980237913599E+02; COFD[ 661] = 0.4360474000056099E+01; COFD[ 662] = -0.3516356776422001E+00; COFD[ 663] = 0.1524545596600636E-01; COFD[ 664] = -0.1878468158135861E+02; COFD[ 665] = 0.4605034135660564E+01; COFD[ 666] = -0.3792399254380860E+00; COFD[ 667] = 0.1627418668274144E-01; COFD[ 668] = -0.1869955819260176E+02; COFD[ 669] = 0.4551222122230642E+01; COFD[ 670] = -0.3741499971521470E+00; COFD[ 671] = 0.1613010818331805E-01; COFD[ 672] = -0.1934447945708759E+02; COFD[ 673] = 0.4835867901258055E+01; COFD[ 674] = -0.3919040198993948E+00; COFD[ 675] = 0.1611935633959806E-01; COFD[ 676] = -0.1499765769172950E+02; COFD[ 677] = 0.3419142531858579E+01; COFD[ 678] = -0.2369480887688776E+00; COFD[ 679] = 0.1059345659536257E-01; COFD[ 680] = -0.1558361674716745E+02; COFD[ 681] = 0.3987098568284434E+01; COFD[ 682] = -0.3109866036899985E+00; COFD[ 683] = 0.1380795341338578E-01; COFD[ 684] = -0.1664237108262573E+02; COFD[ 685] = 0.4321207774376617E+01; COFD[ 686] = -0.3181315549506706E+00; COFD[ 687] = 0.1258308183870717E-01; COFD[ 688] = -0.1871735010457793E+02; COFD[ 689] = 0.4573421885700214E+01; COFD[ 690] = -0.3757214086933051E+00; COFD[ 691] = 0.1614451502251515E-01; COFD[ 692] = -0.1768659705517047E+02; COFD[ 693] = 0.4367905899188474E+01; COFD[ 694] = -0.3516645879639785E+00; COFD[ 695] = 0.1520186726409712E-01; COFD[ 696] = -0.1719414340129819E+02; COFD[ 697] = 0.3490298819499314E+01; COFD[ 698] = -0.1625329575445097E+00; COFD[ 699] = 0.4043757780510787E-02; COFD[ 700] = -0.1872670165824808E+02; COFD[ 701] = 0.4573903479896379E+01; COFD[ 702] = -0.3758340786206700E+00; COFD[ 703] = 0.1615161324401359E-01; COFD[ 704] = -0.1873560076291526E+02; COFD[ 705] = 0.4574325021189105E+01; COFD[ 706] = -0.3759338652876284E+00; COFD[ 707] = 0.1615792922437573E-01; COFD[ 708] = -0.1913993542632300E+02; COFD[ 709] = 0.4730620081114692E+01; COFD[ 710] = -0.3883890489649657E+00; COFD[ 711] = 0.1637150502726411E-01; COFD[ 712] = -0.1913011857829392E+02; COFD[ 713] = 0.4728539523227080E+01; COFD[ 714] = -0.3887605628634140E+00; COFD[ 715] = 0.1641451966125198E-01; COFD[ 716] = -0.1856069485825941E+02; COFD[ 717] = 0.4522343094041945E+01; COFD[ 718] = -0.3705240910138461E+00; COFD[ 719] = 0.1597653817363581E-01; COFD[ 720] = -0.2016579860319706E+02; COFD[ 721] = 0.4877618917687775E+01; COFD[ 722] = -0.3948460761426021E+00; COFD[ 723] = 0.1615166256107893E-01; COFD[ 724] = -0.1928397124960116E+02; COFD[ 725] = 0.4312110001899904E+01; COFD[ 726] = -0.2907322088872635E+00; COFD[ 727] = 0.1043236734543387E-01; COFD[ 728] = -0.1989540745008587E+02; COFD[ 729] = 0.4834853238941779E+01; COFD[ 730] = -0.3932322467650001E+00; COFD[ 731] = 0.1623552876595314E-01; COFD[ 732] = -0.2005638433296784E+02; COFD[ 733] = 0.4810333449107644E+01; COFD[ 734] = -0.3802752927546432E+00; COFD[ 735] = 0.1528555688791561E-01; COFD[ 736] = -0.2016849050066446E+02; COFD[ 737] = 0.4851147993946014E+01; COFD[ 738] = -0.3897545968990269E+00; COFD[ 739] = 0.1586327845280228E-01; COFD[ 740] = -0.1833717188007337E+02; COFD[ 741] = 0.4039707350126803E+01; COFD[ 742] = -0.2488565303881858E+00; COFD[ 743] = 0.8361653792794556E-02; COFD[ 744] = -0.1761289252565630E+02; COFD[ 745] = 0.4327706010171974E+01; COFD[ 746] = -0.3490300604788930E+00; COFD[ 747] = 0.1519713199892770E-01; COFD[ 748] = -0.1688564690991911E+02; COFD[ 749] = 0.4415865473479337E+01; COFD[ 750] = -0.3564104485793251E+00; COFD[ 751] = 0.1533855847922275E-01; COFD[ 752] = -0.1567567878102999E+02; COFD[ 753] = 0.3706302510623138E+01; COFD[ 754] = -0.2096236244277931E+00; COFD[ 755] = 0.6748353057110522E-02; COFD[ 756] = -0.1977364410044468E+02; COFD[ 757] = 0.4876531592391508E+01; COFD[ 758] = -0.3981741984755758E+00; COFD[ 759] = 0.1643476290195535E-01; COFD[ 760] = -0.1891518602118661E+02; COFD[ 761] = 0.4756739681751712E+01; COFD[ 762] = -0.3886685070061102E+00; COFD[ 763] = 0.1625748441332711E-01; COFD[ 764] = -0.1468058225757294E+02; COFD[ 765] = 0.2298576535112391E+01; COFD[ 766] = 0.1810975986268222E-01; COFD[ 767] = -0.4731609614535192E-02; COFD[ 768] = -0.1978049689264223E+02; COFD[ 769] = 0.4876281709421428E+01; COFD[ 770] = -0.3981401304053240E+00; COFD[ 771] = 0.1643325343977014E-01; COFD[ 772] = -0.1978644159772173E+02; COFD[ 773] = 0.4875758120324897E+01; COFD[ 774] = -0.3980631994813040E+00; COFD[ 775] = 0.1642957450530744E-01; COFD[ 776] = -0.1958734834972716E+02; COFD[ 777] = 0.4743227463757541E+01; COFD[ 778] = -0.3686474607497347E+00; COFD[ 779] = 0.1466232449273593E-01; COFD[ 780] = -0.1962180466510501E+02; COFD[ 781] = 0.4762356031601866E+01; COFD[ 782] = -0.3720986023672735E+00; COFD[ 783] = 0.1485121157223651E-01; COFD[ 784] = -0.1966898676800112E+02; COFD[ 785] = 0.4852357825322780E+01; COFD[ 786] = -0.3976302929205434E+00; COFD[ 787] = 0.1651454081234691E-01; COFD[ 788] = -0.1928397124960116E+02; COFD[ 789] = 0.4312110001899904E+01; COFD[ 790] = -0.2907322088872635E+00; COFD[ 791] = 0.1043236734543387E-01; COFD[ 792] = -0.1603997666374001E+02; COFD[ 793] = 0.2734208547161103E+01; COFD[ 794] = -0.4628114204507498E-01; COFD[ 795] = -0.1672840630334733E-02; COFD[ 796] = -0.1961392673202087E+02; COFD[ 797] = 0.4524886156774725E+01; COFD[ 798] = -0.3261423023221592E+00; COFD[ 799] = 0.1226835649915157E-01; COFD[ 800] = -0.1890313414494545E+02; COFD[ 801] = 0.4121436960338011E+01; COFD[ 802] = -0.2594362965642011E+00; COFD[ 803] = 0.8823603190561257E-02; COFD[ 804] = -0.1932385614900669E+02; COFD[ 805] = 0.4295315571390907E+01; COFD[ 806] = -0.2874898966822066E+00; COFD[ 807] = 0.1025198769300411E-01; COFD[ 808] = -0.1580770444028599E+02; COFD[ 809] = 0.2799524263213967E+01; COFD[ 810] = -0.5666575630034967E-01; COFD[ 811] = -0.1137221314445150E-02; COFD[ 812] = -0.1888662285046225E+02; COFD[ 813] = 0.4736242637853255E+01; COFD[ 814] = -0.3893357605389257E+00; COFD[ 815] = 0.1642422988902124E-01; COFD[ 816] = -0.1548207029093904E+02; COFD[ 817] = 0.3939084188623417E+01; COFD[ 818] = -0.3072761108436687E+00; COFD[ 819] = 0.1375770270895890E-01; COFD[ 820] = -0.1685361380845761E+02; COFD[ 821] = 0.4411251776453823E+01; COFD[ 822] = -0.3350756131548513E+00; COFD[ 823] = 0.1353157937321782E-01; COFD[ 824] = -0.1830650296367939E+02; COFD[ 825] = 0.4452394507404697E+01; COFD[ 826] = -0.3624991200767191E+00; COFD[ 827] = 0.1567286266089603E-01; COFD[ 828] = -0.1730907989620245E+02; COFD[ 829] = 0.4243749180870270E+01; COFD[ 830] = -0.3387466685616417E+00; COFD[ 831] = 0.1477905643158847E-01; COFD[ 832] = -0.1868831150678477E+02; COFD[ 833] = 0.4247585976166683E+01; COFD[ 834] = -0.2815735628073077E+00; COFD[ 835] = 0.1000602366593966E-01; COFD[ 836] = -0.1831322697803230E+02; COFD[ 837] = 0.4452261161272578E+01; COFD[ 838] = -0.3624715246876605E+00; COFD[ 839] = 0.1567121551911240E-01; COFD[ 840] = -0.1831968967523493E+02; COFD[ 841] = 0.4452134774757628E+01; COFD[ 842] = -0.3624420830935042E+00; COFD[ 843] = 0.1566936578675393E-01; COFD[ 844] = -0.1892132288088230E+02; COFD[ 845] = 0.4686257416133524E+01; COFD[ 846] = -0.3876178620425971E+00; COFD[ 847] = 0.1654627973246352E-01; COFD[ 848] = -0.1887963312848053E+02; COFD[ 849] = 0.4671422222663214E+01; COFD[ 850] = -0.3861362810478654E+00; COFD[ 851] = 0.1650068107616750E-01; COFD[ 852] = -0.1805980237913599E+02; COFD[ 853] = 0.4360474000056099E+01; COFD[ 854] = -0.3516356776422001E+00; COFD[ 855] = 0.1524545596600636E-01; COFD[ 856] = -0.1989540745008587E+02; COFD[ 857] = 0.4834853238941779E+01; COFD[ 858] = -0.3932322467650001E+00; COFD[ 859] = 0.1623552876595314E-01; COFD[ 860] = -0.1961392673202087E+02; COFD[ 861] = 0.4524886156774725E+01; COFD[ 862] = -0.3261423023221592E+00; COFD[ 863] = 0.1226835649915157E-01; COFD[ 864] = -0.1977017209130380E+02; COFD[ 865] = 0.4847119688172202E+01; COFD[ 866] = -0.4003461404722360E+00; COFD[ 867] = 0.1676917774316241E-01; COFD[ 868] = -0.2005922850303636E+02; COFD[ 869] = 0.4881576631488546E+01; COFD[ 870] = -0.3958945479488925E+00; COFD[ 871] = 0.1621878544379618E-01; COFD[ 872] = -0.2004561228963652E+02; COFD[ 873] = 0.4868786155445560E+01; COFD[ 874] = -0.3974311326078929E+00; COFD[ 875] = 0.1641238256860971E-01; COFD[ 876] = -0.1912297200146966E+02; COFD[ 877] = 0.4463559909783323E+01; COFD[ 878] = -0.3178210889061098E+00; COFD[ 879] = 0.1189219041826154E-01; COFD[ 880] = -0.1703791763653080E+02; COFD[ 881] = 0.4110925857281930E+01; COFD[ 882] = -0.3221728744644342E+00; COFD[ 883] = 0.1408821528952375E-01; COFD[ 884] = -0.1606152931877502E+02; COFD[ 885] = 0.4146430359164761E+01; COFD[ 886] = -0.3312210923439469E+00; COFD[ 887] = 0.1466430884967795E-01; COFD[ 888] = -0.1635823810097287E+02; COFD[ 889] = 0.4123977007004703E+01; COFD[ 890] = -0.2854728908229589E+00; COFD[ 891] = 0.1088433914065527E-01; COFD[ 892] = -0.1904759289384530E+02; COFD[ 893] = 0.4700929573683383E+01; COFD[ 894] = -0.3903608191209737E+00; COFD[ 895] = 0.1670229359855150E-01; COFD[ 896] = -0.1795985745949001E+02; COFD[ 897] = 0.4461091096847464E+01; COFD[ 898] = -0.3624212969270659E+00; COFD[ 899] = 0.1561553586311572E-01; COFD[ 900] = -0.1767910948230725E+02; COFD[ 901] = 0.3722086580781685E+01; COFD[ 902] = -0.1980254146986777E+00; COFD[ 903] = 0.5799038224028695E-02; COFD[ 904] = -0.1905419842832635E+02; COFD[ 905] = 0.4700662819460902E+01; COFD[ 906] = -0.3903206776226419E+00; COFD[ 907] = 0.1670032573134212E-01; COFD[ 908] = -0.1906036212420135E+02; COFD[ 909] = 0.4700323443044125E+01; COFD[ 910] = -0.3902665913786998E+00; COFD[ 911] = 0.1669755465043006E-01; COFD[ 912] = -0.1934401530563503E+02; COFD[ 913] = 0.4788242539311199E+01; COFD[ 914] = -0.3933254249328604E+00; COFD[ 915] = 0.1648505821153991E-01; COFD[ 916] = -0.1933004429336219E+02; COFD[ 917] = 0.4785693509382203E+01; COFD[ 918] = -0.3936420075604487E+00; COFD[ 919] = 0.1652573779841039E-01; COFD[ 920] = -0.1878468158135861E+02; COFD[ 921] = 0.4605034135660564E+01; COFD[ 922] = -0.3792399254380860E+00; COFD[ 923] = 0.1627418668274144E-01; COFD[ 924] = -0.2005638433296784E+02; COFD[ 925] = 0.4810333449107644E+01; COFD[ 926] = -0.3802752927546432E+00; COFD[ 927] = 0.1528555688791561E-01; COFD[ 928] = -0.1890313414494545E+02; COFD[ 929] = 0.4121436960338011E+01; COFD[ 930] = -0.2594362965642011E+00; COFD[ 931] = 0.8823603190561257E-02; COFD[ 932] = -0.2005922850303636E+02; COFD[ 933] = 0.4881576631488546E+01; COFD[ 934] = -0.3958945479488925E+00; COFD[ 935] = 0.1621878544379618E-01; COFD[ 936] = -0.1999234499569811E+02; COFD[ 937] = 0.4757309526477115E+01; COFD[ 938] = -0.3684627400545891E+00; COFD[ 939] = 0.1457942336558470E-01; COFD[ 940] = -0.2015885283044970E+02; COFD[ 941] = 0.4822728985557825E+01; COFD[ 942] = -0.3812000138893480E+00; COFD[ 943] = 0.1529926403243838E-01; COFD[ 944] = -0.1846533503760062E+02; COFD[ 945] = 0.4088215083590887E+01; COFD[ 946] = -0.2553371937407484E+00; COFD[ 947] = 0.8657586372083866E-02; COFD[ 948] = -0.1776457707271082E+02; COFD[ 949] = 0.4365715498242292E+01; COFD[ 950] = -0.3517547303197583E+00; COFD[ 951] = 0.1522394939239619E-01; COFD[ 952] = -0.1596236229610858E+02; COFD[ 953] = 0.4079842750882008E+01; COFD[ 954] = -0.3231960602071978E+00; COFD[ 955] = 0.1434305777680907E-01; COFD[ 956] = -0.1646195534760534E+02; COFD[ 957] = 0.4141822899037704E+01; COFD[ 958] = -0.2892308013569180E+00; COFD[ 959] = 0.1109614462742981E-01; COFD[ 960] = -0.1883799365221673E+02; COFD[ 961] = 0.4594611640098751E+01; COFD[ 962] = -0.3781168670160998E+00; COFD[ 963] = 0.1623467419613562E-01; COFD[ 964] = -0.1782798069130013E+02; COFD[ 965] = 0.4381516462622495E+01; COFD[ 966] = -0.3532970918753254E+00; COFD[ 967] = 0.1526804966457564E-01; COFD[ 968] = -0.1813805288510309E+02; COFD[ 969] = 0.3907124675584541E+01; COFD[ 970] = -0.2273991439446390E+00; COFD[ 971] = 0.7280601459305962E-02; COFD[ 972] = -0.1884529530697174E+02; COFD[ 973] = 0.4594634055023398E+01; COFD[ 974] = -0.3781333057516071E+00; COFD[ 975] = 0.1623599168682821E-01; COFD[ 976] = -0.1885218260686505E+02; COFD[ 977] = 0.4594596529989258E+01; COFD[ 978] = -0.3781371531513473E+00; COFD[ 979] = 0.1623655159949430E-01; COFD[ 980] = -0.1926514266945471E+02; COFD[ 981] = 0.4743728688652060E+01; COFD[ 982] = -0.3899270700585402E+00; COFD[ 983] = 0.1643466452124135E-01; COFD[ 984] = -0.1926485085357470E+02; COFD[ 985] = 0.4746666060969545E+01; COFD[ 986] = -0.3910387566978661E+00; COFD[ 987] = 0.1651352760050653E-01; COFD[ 988] = -0.1869955819260176E+02; COFD[ 989] = 0.4551222122230642E+01; COFD[ 990] = -0.3741499971521470E+00; COFD[ 991] = 0.1613010818331805E-01; COFD[ 992] = -0.2016849050066446E+02; COFD[ 993] = 0.4851147993946014E+01; COFD[ 994] = -0.3897545968990269E+00; COFD[ 995] = 0.1586327845280228E-01; COFD[ 996] = -0.1932385614900669E+02; COFD[ 997] = 0.4295315571390907E+01; COFD[ 998] = -0.2874898966822066E+00; COFD[ 999] = 0.1025198769300411E-01; COFD[ 1000] = -0.2004561228963652E+02; COFD[ 1001] = 0.4868786155445560E+01; COFD[ 1002] = -0.3974311326078929E+00; COFD[ 1003] = 0.1641238256860971E-01; COFD[ 1004] = -0.2015885283044970E+02; COFD[ 1005] = 0.4822728985557825E+01; COFD[ 1006] = -0.3812000138893480E+00; COFD[ 1007] = 0.1529926403243838E-01; COFD[ 1008] = -0.2024931478123324E+02; COFD[ 1009] = 0.4856999247129295E+01; COFD[ 1010] = -0.3896182557523967E+00; COFD[ 1011] = 0.1582188160996442E-01; COFD[ 1012] = -0.1871694128239326E+02; COFD[ 1013] = 0.4183824876778368E+01; COFD[ 1014] = -0.2717303313431965E+00; COFD[ 1015] = 0.9517448825356330E-02; COFD[ 1016] = -0.1776451185181849E+02; COFD[ 1017] = 0.4347765208885912E+01; COFD[ 1018] = -0.3515930667599710E+00; COFD[ 1019] = 0.1530764063925441E-01; COFD[ 1020] = -0.1682806034335371E+02; COFD[ 1021] = 0.4485450500929320E+01; COFD[ 1022] = -0.3655695289846634E+00; COFD[ 1023] = 0.1574614737371121E-01; COFD[ 1024] = -0.1626359848098758E+02; COFD[ 1025] = 0.4116788683843708E+01; COFD[ 1026] = -0.2733289330957227E+00; COFD[ 1027] = 0.9943035956817482E-02; COFD[ 1028] = -0.1934710272078301E+02; COFD[ 1029] = 0.4822779400529180E+01; COFD[ 1030] = -0.3871228449439424E+00; COFD[ 1031] = 0.1578686341252481E-01; COFD[ 1032] = -0.1863707722655403E+02; COFD[ 1033] = 0.4806992114833483E+01; COFD[ 1034] = -0.3984196894018535E+00; COFD[ 1035] = 0.1681856268295304E-01; COFD[ 1036] = -0.1316791411191276E+02; COFD[ 1037] = 0.1635596276910710E+01; COFD[ 1038] = 0.1248562922809231E+00; COFD[ 1039] = -0.1002143991895203E-01; COFD[ 1040] = -0.1919185322703664E+02; COFD[ 1041] = 0.4784033288807873E+01; COFD[ 1042] = -0.3849459064100618E+00; COFD[ 1043] = 0.1580648990064550E-01; COFD[ 1044] = -0.1918517200292939E+02; COFD[ 1045] = 0.4778400021399422E+01; COFD[ 1046] = -0.3840658172050402E+00; COFD[ 1047] = 0.1576203256152660E-01; COFD[ 1048] = -0.1944043258564383E+02; COFD[ 1049] = 0.4828743160693618E+01; COFD[ 1050] = -0.3828021863328961E+00; COFD[ 1051] = 0.1540186987470838E-01; COFD[ 1052] = -0.1941730561245821E+02; COFD[ 1053] = 0.4788578363438019E+01; COFD[ 1054] = -0.3737994268951498E+00; COFD[ 1055] = 0.1486050997611076E-01; COFD[ 1056] = -0.1934447945708759E+02; COFD[ 1057] = 0.4835867901258055E+01; COFD[ 1058] = -0.3919040198993948E+00; COFD[ 1059] = 0.1611935633959806E-01; COFD[ 1060] = -0.1833717188007337E+02; COFD[ 1061] = 0.4039707350126803E+01; COFD[ 1062] = -0.2488565303881858E+00; COFD[ 1063] = 0.8361653792794556E-02; COFD[ 1064] = -0.1580770444028599E+02; COFD[ 1065] = 0.2799524263213967E+01; COFD[ 1066] = -0.5666575630034967E-01; COFD[ 1067] = -0.1137221314445150E-02; COFD[ 1068] = -0.1912297200146966E+02; COFD[ 1069] = 0.4463559909783323E+01; COFD[ 1070] = -0.3178210889061098E+00; COFD[ 1071] = 0.1189219041826154E-01; COFD[ 1072] = -0.1846533503760062E+02; COFD[ 1073] = 0.4088215083590887E+01; COFD[ 1074] = -0.2553371937407484E+00; COFD[ 1075] = 0.8657586372083866E-02; COFD[ 1076] = -0.1871694128239326E+02; COFD[ 1077] = 0.4183824876778368E+01; COFD[ 1078] = -0.2717303313431965E+00; COFD[ 1079] = 0.9517448825356330E-02; COFD[ 1080] = -0.1451532645593394E+02; COFD[ 1081] = 0.2310965406901932E+01; COFD[ 1082] = 0.1872493495986990E-01; COFD[ 1083] = -0.4810190587113683E-02; COFD[ 1084] = -0.1862683104563700E+02; COFD[ 1085] = 0.4778876685436832E+01; COFD[ 1086] = -0.3977561689282376E+00; COFD[ 1087] = 0.1691190324024170E-01; COFD[ 1088] = -0.1163276178298392E+02; COFD[ 1089] = 0.2530205071288130E+01; COFD[ 1090] = -0.1183776350934922E+00; COFD[ 1091] = 0.5312988352557955E-02; COFD[ 1092] = -0.1522605770447405E+02; COFD[ 1093] = 0.4154863253653862E+01; COFD[ 1094] = -0.3321404775716639E+00; COFD[ 1095] = 0.1469592836326692E-01; COFD[ 1096] = -0.1527049784849730E+02; COFD[ 1097] = 0.3538365317994180E+01; COFD[ 1098] = -0.2526814234121028E+00; COFD[ 1099] = 0.1128573168209360E-01; COFD[ 1100] = -0.1395220381010925E+02; COFD[ 1101] = 0.3159688299904867E+01; COFD[ 1102] = -0.2028455188834762E+00; COFD[ 1103] = 0.9099190221367019E-02; COFD[ 1104] = -0.1875637020961290E+02; COFD[ 1105] = 0.4821872691746870E+01; COFD[ 1106] = -0.3987313751514457E+00; COFD[ 1107] = 0.1676685214877852E-01; COFD[ 1108] = -0.1528586419146581E+02; COFD[ 1109] = 0.3542872339374690E+01; COFD[ 1110] = -0.2532972672017028E+00; COFD[ 1111] = 0.1131367837559019E-01; COFD[ 1112] = -0.1530075071186979E+02; COFD[ 1113] = 0.3547276678457322E+01; COFD[ 1114] = -0.2538990508478524E+00; COFD[ 1115] = 0.1134098602286849E-01; COFD[ 1116] = -0.1573476318984406E+02; COFD[ 1117] = 0.3712839423735899E+01; COFD[ 1118] = -0.2734220687823376E+00; COFD[ 1119] = 0.1210212757075583E-01; COFD[ 1120] = -0.1569001110440693E+02; COFD[ 1121] = 0.3696584277188425E+01; COFD[ 1122] = -0.2713867054547939E+00; COFD[ 1123] = 0.1201712146112774E-01; COFD[ 1124] = -0.1499765769172950E+02; COFD[ 1125] = 0.3419142531858579E+01; COFD[ 1126] = -0.2369480887688776E+00; COFD[ 1127] = 0.1059345659536257E-01; COFD[ 1128] = -0.1761289252565630E+02; COFD[ 1129] = 0.4327706010171974E+01; COFD[ 1130] = -0.3490300604788930E+00; COFD[ 1131] = 0.1519713199892770E-01; COFD[ 1132] = -0.1888662285046225E+02; COFD[ 1133] = 0.4736242637853255E+01; COFD[ 1134] = -0.3893357605389257E+00; COFD[ 1135] = 0.1642422988902124E-01; COFD[ 1136] = -0.1703791763653080E+02; COFD[ 1137] = 0.4110925857281930E+01; COFD[ 1138] = -0.3221728744644342E+00; COFD[ 1139] = 0.1408821528952375E-01; COFD[ 1140] = -0.1776457707271082E+02; COFD[ 1141] = 0.4365715498242292E+01; COFD[ 1142] = -0.3517547303197583E+00; COFD[ 1143] = 0.1522394939239619E-01; COFD[ 1144] = -0.1776451185181849E+02; COFD[ 1145] = 0.4347765208885912E+01; COFD[ 1146] = -0.3515930667599710E+00; COFD[ 1147] = 0.1530764063925441E-01; COFD[ 1148] = -0.1862683104563700E+02; COFD[ 1149] = 0.4778876685436832E+01; COFD[ 1150] = -0.3977561689282376E+00; COFD[ 1151] = 0.1691190324024170E-01; COFD[ 1152] = -0.1377266831742670E+02; COFD[ 1153] = 0.3040578783767133E+01; COFD[ 1154] = -0.1869754178307637E+00; COFD[ 1155] = 0.8394057441551481E-02; }; #if defined(BL_FORT_USE_UPPERCASE) #define egtransetKTDIF EGTRANSETKTDIF #elif defined(BL_FORT_USE_LOWERCASE) #define egtransetKTDIF egtransetktdif #elif defined(BL_FORT_USE_UNDERSCORE) #define egtransetKTDIF egtransetktdif_ #endif void egtransetKTDIF(int* KTDIF) { KTDIF[ 0] = 1; KTDIF[ 1] = 2; }; #if defined(BL_FORT_USE_UPPERCASE) #define egtransetCOFTD EGTRANSETCOFTD #elif defined(BL_FORT_USE_LOWERCASE) #define egtransetCOFTD egtransetcoftd #elif defined(BL_FORT_USE_UNDERSCORE) #define egtransetCOFTD egtransetcoftd_ #endif void egtransetCOFTD(double* COFTD) { COFTD[ 0] = 0.0000000000000000E+00; COFTD[ 1] = 0.0000000000000000E+00; COFTD[ 2] = 0.0000000000000000E+00; COFTD[ 3] = 0.0000000000000000E+00; COFTD[ 4] = -0.1267157358260665E+00; COFTD[ 5] = -0.1025304929454753E-03; COFTD[ 6] = 0.5456049480958656E-07; COFTD[ 7] = -0.8851811492383965E-11; COFTD[ 8] = 0.3818642928024142E+00; COFTD[ 9] = 0.1841173676342166E-03; COFTD[ 10] = -0.9796176034833310E-07; COFTD[ 11] = 0.1625422476308086E-10; COFTD[ 12] = 0.3754754585061315E+00; COFTD[ 13] = 0.9735779997691615E-04; COFTD[ 14] = -0.4943935849608963E-07; COFTD[ 15] = 0.8333052020411856E-11; COFTD[ 16] = 0.1951583360602999E-02; COFTD[ 17] = 0.6694708047799631E-03; COFTD[ 18] = -0.3121487146752215E-06; COFTD[ 19] = 0.4529499206739157E-10; COFTD[ 20] = 0.3833421804573635E+00; COFTD[ 21] = 0.1848299369679226E-03; COFTD[ 22] = -0.9834089104739220E-07; COFTD[ 23] = 0.1631713171345801E-10; COFTD[ 24] = 0.3847373793384187E+00; COFTD[ 25] = 0.1855026375847330E-03; COFTD[ 26] = -0.9869880913766751E-07; COFTD[ 27] = 0.1637651897911576E-10; COFTD[ 28] = 0.2912627324548178E+00; COFTD[ 29] = 0.2330450691123891E-03; COFTD[ 30] = -0.1240401108265922E-06; COFTD[ 31] = 0.2013458793291140E-10; COFTD[ 32] = 0.2989740640904121E+00; COFTD[ 33] = 0.2322310123534718E-03; COFTD[ 34] = -0.1236750374915925E-06; COFTD[ 35] = 0.2010297352068701E-10; COFTD[ 36] = 0.3874054384854359E+00; COFTD[ 37] = 0.1568838091395251E-03; COFTD[ 38] = -0.8293099098207665E-07; COFTD[ 39] = 0.1384603182952203E-10; COFTD[ 40] = 0.2471291249759435E+00; COFTD[ 41] = 0.4493957116165469E-03; COFTD[ 42] = -0.2320307603798581E-06; COFTD[ 43] = 0.3625788270902565E-10; COFTD[ 44] = 0.8921833323260459E-01; COFTD[ 45] = 0.6385973457264696E-03; COFTD[ 46] = -0.3105267185106736E-06; COFTD[ 47] = 0.4632182714912095E-10; COFTD[ 48] = 0.2613831106363483E+00; COFTD[ 49] = 0.3741001421151409E-03; COFTD[ 50] = -0.1952756438202893E-06; COFTD[ 51] = 0.3084446425883240E-10; COFTD[ 52] = 0.2066211976818736E+00; COFTD[ 53] = 0.4698391340071242E-03; COFTD[ 54] = -0.2399965709388047E-06; COFTD[ 55] = 0.3714866667604425E-10; COFTD[ 56] = 0.2301782357423144E+00; COFTD[ 57] = 0.4411239723719602E-03; COFTD[ 58] = -0.2271927867378409E-06; COFTD[ 59] = 0.3542100728590062E-10; COFTD[ 60] = 0.5978875014515522E-01; COFTD[ 61] = 0.6004068868355234E-03; COFTD[ 62] = -0.2889528616835724E-06; COFTD[ 63] = 0.4280270387409600E-10; COFTD[ 64] = 0.3671545750621259E+00; COFTD[ 65] = 0.7074871807561138E-04; COFTD[ 66] = -0.3401390884461344E-07; COFTD[ 67] = 0.5718817519581169E-11; COFTD[ 68] = 0.1267157358260665E+00; COFTD[ 69] = 0.1025304929454753E-03; COFTD[ 70] = -0.5456049480958656E-07; COFTD[ 71] = 0.8851811492383965E-11; COFTD[ 72] = 0.0000000000000000E+00; COFTD[ 73] = 0.0000000000000000E+00; COFTD[ 74] = 0.0000000000000000E+00; COFTD[ 75] = 0.0000000000000000E+00; COFTD[ 76] = 0.1397457852745939E+00; COFTD[ 77] = 0.6298108591719154E-03; COFTD[ 78] = -0.3116940342118423E-06; COFTD[ 79] = 0.4707558637232602E-10; COFTD[ 80] = 0.1945605631423317E+00; COFTD[ 81] = 0.5079167441644920E-03; COFTD[ 82] = -0.2577206587518775E-06; COFTD[ 83] = 0.3967209139227169E-10; COFTD[ 84] = -0.1609284370381552E+00; COFTD[ 85] = 0.8016856147508327E-03; COFTD[ 86] = -0.3249766388129991E-06; COFTD[ 87] = 0.4319581912733095E-10; COFTD[ 88] = 0.1400151641499396E+00; COFTD[ 89] = 0.6310249046659604E-03; COFTD[ 90] = -0.3122948665605463E-06; COFTD[ 91] = 0.4716633092314242E-10; COFTD[ 92] = 0.1402690373253105E+00; COFTD[ 93] = 0.6321690685660495E-03; COFTD[ 94] = -0.3128611144373887E-06; COFTD[ 95] = 0.4725185213275378E-10; COFTD[ 96] = 0.6875565922217064E-01; COFTD[ 97] = 0.6631049392258758E-03; COFTD[ 98] = -0.3194849555055448E-06; COFTD[ 99] = 0.4736101303399774E-10; COFTD[ 100] = 0.7315173371679451E-01; COFTD[ 101] = 0.6642955892285024E-03; COFTD[ 102] = -0.3206112835377731E-06; COFTD[ 103] = 0.4758315870108564E-10; COFTD[ 104] = 0.1587269043383360E+00; COFTD[ 105] = 0.5967534238686169E-03; COFTD[ 106] = -0.2976737939540804E-06; COFTD[ 107] = 0.4521938974583591E-10; COFTD[ 108] = -0.3849651678153934E-01; COFTD[ 109] = 0.8347949814005065E-03; COFTD[ 110] = -0.3810312564452182E-06; COFTD[ 111] = 0.5455319374182929E-10; COFTD[ 112] = -0.1529552889766811E+00; COFTD[ 113] = 0.8501473210897147E-03; COFTD[ 114] = -0.3529415371202759E-06; COFTD[ 115] = 0.4763453033863902E-10; COFTD[ 116] = -0.6479936638152356E-02; COFTD[ 117] = 0.7836066032604117E-03; COFTD[ 118] = -0.3637347512791469E-06; COFTD[ 119] = 0.5263072586686018E-10; COFTD[ 120] = -0.6410089132874891E-01; COFTD[ 121] = 0.8311312698059269E-03; COFTD[ 122] = -0.3732877529348841E-06; COFTD[ 123] = 0.5291055177769992E-10; COFTD[ 124] = -0.4419302611803710E-01; COFTD[ 125] = 0.8219675155377782E-03; COFTD[ 126] = -0.3737680415530062E-06; COFTD[ 127] = 0.5338781013168227E-10; COFTD[ 128] = -0.1412627145163882E+00; COFTD[ 129] = 0.8094401725653276E-03; COFTD[ 130] = -0.3379215246058519E-06; COFTD[ 131] = 0.4576856099561897E-10; COFTD[ 132] = 0.2126392909101736E+00; COFTD[ 133] = 0.4604824955661828E-03; COFTD[ 134] = -0.2357734960762928E-06; COFTD[ 135] = 0.3656872132522026E-10; }; #if 0 \\ \\ \\ This is the mechanism file \\ \\ ELEMENTS O H C N END SPECIES H2 H O2 OH H2O HO2 H2O2 CH3 CH4 CO CO2 CH2O C2H2 C2H4 C2H6 NH3 NO HCN N2 END REACTIONS END \\ \\ \\ This is the therm file \\ \\ THERMO 300.000 1000.000 5000.000 ! GRI-Mech Version 3.0 Thermodynamics released 7/30/99 ! NASA Polynomial format for CHEMKIN-II ! see README file for disclaimer O L 1/90O 1 G 200.000 3500.000 1000.000 1 2.56942078E+00-8.59741137E-05 4.19484589E-08-1.00177799E-11 1.22833691E-15 2 2.92175791E+04 4.78433864E+00 3.16826710E+00-3.27931884E-03 6.64306396E-06 3 -6.12806624E-09 2.11265971E-12 2.91222592E+04 2.05193346E+00 4 O2 TPIS89O 2 G 200.000 3500.000 1000.000 1 3.28253784E+00 1.48308754E-03-7.57966669E-07 2.09470555E-10-2.16717794E-14 2 -1.08845772E+03 5.45323129E+00 3.78245636E+00-2.99673416E-03 9.84730201E-06 3 -9.68129509E-09 3.24372837E-12-1.06394356E+03 3.65767573E+00 4 H L 7/88H 1 G 200.000 3500.000 1000.000 1 2.50000001E+00-2.30842973E-11 1.61561948E-14-4.73515235E-18 4.98197357E-22 2 2.54736599E+04-4.46682914E-01 2.50000000E+00 7.05332819E-13-1.99591964E-15 3 2.30081632E-18-9.27732332E-22 2.54736599E+04-4.46682853E-01 4 H2 TPIS78H 2 G 200.000 3500.000 1000.000 1 3.33727920E+00-4.94024731E-05 4.99456778E-07-1.79566394E-10 2.00255376E-14 2 -9.50158922E+02-3.20502331E+00 2.34433112E+00 7.98052075E-03-1.94781510E-05 3 2.01572094E-08-7.37611761E-12-9.17935173E+02 6.83010238E-01 4 OH RUS 78O 1H 1 G 200.000 3500.000 1000.000 1 3.09288767E+00 5.48429716E-04 1.26505228E-07-8.79461556E-11 1.17412376E-14 2 3.85865700E+03 4.47669610E+00 3.99201543E+00-2.40131752E-03 4.61793841E-06 3 -3.88113333E-09 1.36411470E-12 3.61508056E+03-1.03925458E-01 4 H2O L 8/89H 2O 1 G 200.000 3500.000 1000.000 1 3.03399249E+00 2.17691804E-03-1.64072518E-07-9.70419870E-11 1.68200992E-14 2 -3.00042971E+04 4.96677010E+00 4.19864056E+00-2.03643410E-03 6.52040211E-06 3 -5.48797062E-09 1.77197817E-12-3.02937267E+04-8.49032208E-01 4 HO2 L 5/89H 1O 2 G 200.000 3500.000 1000.000 1 4.01721090E+00 2.23982013E-03-6.33658150E-07 1.14246370E-10-1.07908535E-14 2 1.11856713E+02 3.78510215E+00 4.30179801E+00-4.74912051E-03 2.11582891E-05 3 -2.42763894E-08 9.29225124E-12 2.94808040E+02 3.71666245E+00 4 H2O2 L 7/88H 2O 2 G 200.000 3500.000 1000.000 1 4.16500285E+00 4.90831694E-03-1.90139225E-06 3.71185986E-10-2.87908305E-14 2 -1.78617877E+04 2.91615662E+00 4.27611269E+00-5.42822417E-04 1.67335701E-05 3 -2.15770813E-08 8.62454363E-12-1.77025821E+04 3.43505074E+00 4 C L11/88C 1 G 200.000 3500.000 1000.000 1 2.49266888E+00 4.79889284E-05-7.24335020E-08 3.74291029E-11-4.87277893E-15 2 8.54512953E+04 4.80150373E+00 2.55423955E+00-3.21537724E-04 7.33792245E-07 3 -7.32234889E-10 2.66521446E-13 8.54438832E+04 4.53130848E+00 4 CH TPIS79C 1H 1 G 200.000 3500.000 1000.000 1 2.87846473E+00 9.70913681E-04 1.44445655E-07-1.30687849E-10 1.76079383E-14 2 7.10124364E+04 5.48497999E+00 3.48981665E+00 3.23835541E-04-1.68899065E-06 3 3.16217327E-09-1.40609067E-12 7.07972934E+04 2.08401108E+00 4 CH2 L S/93C 1H 2 G 200.000 3500.000 1000.000 1 2.87410113E+00 3.65639292E-03-1.40894597E-06 2.60179549E-10-1.87727567E-14 2 4.62636040E+04 6.17119324E+00 3.76267867E+00 9.68872143E-04 2.79489841E-06 3 -3.85091153E-09 1.68741719E-12 4.60040401E+04 1.56253185E+00 4 CH2(S) L S/93C 1H 2 G 200.000 3500.000 1000.000 1 2.29203842E+00 4.65588637E-03-2.01191947E-06 4.17906000E-10-3.39716365E-14 2 5.09259997E+04 8.62650169E+00 4.19860411E+00-2.36661419E-03 8.23296220E-06 3 -6.68815981E-09 1.94314737E-12 5.04968163E+04-7.69118967E-01 4 CH3 L11/89C 1H 3 G 200.000 3500.000 1000.000 1 2.28571772E+00 7.23990037E-03-2.98714348E-06 5.95684644E-10-4.67154394E-14 2 1.67755843E+04 8.48007179E+00 3.67359040E+00 2.01095175E-03 5.73021856E-06 3 -6.87117425E-09 2.54385734E-12 1.64449988E+04 1.60456433E+00 4 CH4 L 8/88C 1H 4 G 200.000 3500.000 1000.000 1 7.48514950E-02 1.33909467E-02-5.73285809E-06 1.22292535E-09-1.01815230E-13 2 -9.46834459E+03 1.84373180E+01 5.14987613E+00-1.36709788E-02 4.91800599E-05 3 -4.84743026E-08 1.66693956E-11-1.02466476E+04-4.64130376E+00 4 CO TPIS79C 1O 1 G 200.000 3500.000 1000.000 1 2.71518561E+00 2.06252743E-03-9.98825771E-07 2.30053008E-10-2.03647716E-14 2 -1.41518724E+04 7.81868772E+00 3.57953347E+00-6.10353680E-04 1.01681433E-06 3 9.07005884E-10-9.04424499E-13-1.43440860E+04 3.50840928E+00 4 CO2 L 7/88C 1O 2 G 200.000 3500.000 1000.000 1 3.85746029E+00 4.41437026E-03-2.21481404E-06 5.23490188E-10-4.72084164E-14 2 -4.87591660E+04 2.27163806E+00 2.35677352E+00 8.98459677E-03-7.12356269E-06 3 2.45919022E-09-1.43699548E-13-4.83719697E+04 9.90105222E+00 4 HCO L12/89H 1C 1O 1 G 200.000 3500.000 1000.000 1 2.77217438E+00 4.95695526E-03-2.48445613E-06 5.89161778E-10-5.33508711E-14 2 4.01191815E+03 9.79834492E+00 4.22118584E+00-3.24392532E-03 1.37799446E-05 3 -1.33144093E-08 4.33768865E-12 3.83956496E+03 3.39437243E+00 4 CH2O L 8/88H 2C 1O 1 G 200.000 3500.000 1000.000 1 1.76069008E+00 9.20000082E-03-4.42258813E-06 1.00641212E-09-8.83855640E-14 2 -1.39958323E+04 1.36563230E+01 4.79372315E+00-9.90833369E-03 3.73220008E-05 3 -3.79285261E-08 1.31772652E-11-1.43089567E+04 6.02812900E-01 4 CH2OH GUNL93C 1H 3O 1 G 200.000 3500.000 1000.000 1 3.69266569E+00 8.64576797E-03-3.75101120E-06 7.87234636E-10-6.48554201E-14 2 -3.24250627E+03 5.81043215E+00 3.86388918E+00 5.59672304E-03 5.93271791E-06 3 -1.04532012E-08 4.36967278E-12-3.19391367E+03 5.47302243E+00 4 CH3O 121686C 1H 3O 1 G 300.00 3000.00 1000.000 1 0.03770799E+02 0.07871497E-01-0.02656384E-04 0.03944431E-08-0.02112616E-12 2 0.12783252E+03 0.02929575E+02 0.02106204E+02 0.07216595E-01 0.05338472E-04 3 -0.07377636E-07 0.02075610E-10 0.09786011E+04 0.13152177E+02 4 CH3OH L 8/88C 1H 4O 1 G 200.000 3500.000 1000.000 1 1.78970791E+00 1.40938292E-02-6.36500835E-06 1.38171085E-09-1.17060220E-13 2 -2.53748747E+04 1.45023623E+01 5.71539582E+00-1.52309129E-02 6.52441155E-05 3 -7.10806889E-08 2.61352698E-11-2.56427656E+04-1.50409823E+00 4 C2H L 1/91C 2H 1 G 200.000 3500.000 1000.000 1 3.16780652E+00 4.75221902E-03-1.83787077E-06 3.04190252E-10-1.77232770E-14 2 6.71210650E+04 6.63589475E+00 2.88965733E+00 1.34099611E-02-2.84769501E-05 3 2.94791045E-08-1.09331511E-11 6.68393932E+04 6.22296438E+00 4 C2H2 L 1/91C 2H 2 G 200.000 3500.000 1000.000 1 4.14756964E+00 5.96166664E-03-2.37294852E-06 4.67412171E-10-3.61235213E-14 2 2.59359992E+04-1.23028121E+00 8.08681094E-01 2.33615629E-02-3.55171815E-05 3 2.80152437E-08-8.50072974E-12 2.64289807E+04 1.39397051E+01 4 C2H3 L 2/92C 2H 3 G 200.000 3500.000 1000.000 1 3.01672400E+00 1.03302292E-02-4.68082349E-06 1.01763288E-09-8.62607041E-14 2 3.46128739E+04 7.78732378E+00 3.21246645E+00 1.51479162E-03 2.59209412E-05 3 -3.57657847E-08 1.47150873E-11 3.48598468E+04 8.51054025E+00 4 C2H4 L 1/91C 2H 4 G 200.000 3500.000 1000.000 1 2.03611116E+00 1.46454151E-02-6.71077915E-06 1.47222923E-09-1.25706061E-13 2 4.93988614E+03 1.03053693E+01 3.95920148E+00-7.57052247E-03 5.70990292E-05 3 -6.91588753E-08 2.69884373E-11 5.08977593E+03 4.09733096E+00 4 C2H5 L12/92C 2H 5 G 200.000 3500.000 1000.000 1 1.95465642E+00 1.73972722E-02-7.98206668E-06 1.75217689E-09-1.49641576E-13 2 1.28575200E+04 1.34624343E+01 4.30646568E+00-4.18658892E-03 4.97142807E-05 3 -5.99126606E-08 2.30509004E-11 1.28416265E+04 4.70720924E+00 4 C2H6 L 8/88C 2H 6 G 200.000 3500.000 1000.000 1 1.07188150E+00 2.16852677E-02-1.00256067E-05 2.21412001E-09-1.90002890E-13 2 -1.14263932E+04 1.51156107E+01 4.29142492E+00-5.50154270E-03 5.99438288E-05 3 -7.08466285E-08 2.68685771E-11-1.15222055E+04 2.66682316E+00 4 CH2CO L 5/90C 2H 2O 1 G 200.000 3500.000 1000.000 1 4.51129732E+00 9.00359745E-03-4.16939635E-06 9.23345882E-10-7.94838201E-14 2 -7.55105311E+03 6.32247205E-01 2.13583630E+00 1.81188721E-02-1.73947474E-05 3 9.34397568E-09-2.01457615E-12-7.04291804E+03 1.22156480E+01 4 HCCO SRIC91H 1C 2O 1 G 300.00 4000.00 1000.000 1 0.56282058E+01 0.40853401E-02-0.15934547E-05 0.28626052E-09-0.19407832E-13 2 0.19327215E+05-0.39302595E+01 0.22517214E+01 0.17655021E-01-0.23729101E-04 3 0.17275759E-07-0.50664811E-11 0.20059449E+05 0.12490417E+02 4 HCCOH SRI91C 2O 1H 2 G 300.000 5000.000 1000.000 1 0.59238291E+01 0.67923600E-02-0.25658564E-05 0.44987841E-09-0.29940101E-13 2 0.72646260E+04-0.76017742E+01 0.12423733E+01 0.31072201E-01-0.50866864E-04 3 0.43137131E-07-0.14014594E-10 0.80316143E+04 0.13874319E+02 4 H2CN 41687H 2C 1N 1 G 300.00 4000.000 1000.000 1 0.52097030E+01 0.29692911E-02-0.28555891E-06-0.16355500E-09 0.30432589E-13 2 0.27677109E+05-0.44444780E+01 0.28516610E+01 0.56952331E-02 0.10711400E-05 3 -0.16226120E-08-0.23511081E-12 0.28637820E+05 0.89927511E+01 4 HCN GRI/98H 1C 1N 1 G 200.000 6000.000 1000.000 1 0.38022392E+01 0.31464228E-02-0.10632185E-05 0.16619757E-09-0.97997570E-14 2 0.14407292E+05 0.15754601E+01 0.22589886E+01 0.10051170E-01-0.13351763E-04 3 0.10092349E-07-0.30089028E-11 0.14712633E+05 0.89164419E+01 4 HNO And93 H 1N 1O 1 G 200.000 6000.000 1000.000 1 0.29792509E+01 0.34944059E-02-0.78549778E-06 0.57479594E-10-0.19335916E-15 2 0.11750582E+05 0.86063728E+01 0.45334916E+01-0.56696171E-02 0.18473207E-04 3 -0.17137094E-07 0.55454573E-11 0.11548297E+05 0.17498417E+01 4 N L 6/88N 1 G 200.000 6000.000 1000.000 1 0.24159429E+01 0.17489065E-03-0.11902369E-06 0.30226245E-10-0.20360982E-14 2 0.56133773E+05 0.46496096E+01 0.25000000E+01 0.00000000E+00 0.00000000E+00 3 0.00000000E+00 0.00000000E+00 0.56104637E+05 0.41939087E+01 4 NNH T07/93N 2H 1 G 200.000 6000.000 1000.000 1 0.37667544E+01 0.28915082E-02-0.10416620E-05 0.16842594E-09-0.10091896E-13 2 0.28650697E+05 0.44705067E+01 0.43446927E+01-0.48497072E-02 0.20059459E-04 3 -0.21726464E-07 0.79469539E-11 0.28791973E+05 0.29779410E+01 4 N2O L 7/88N 2O 1 G 200.000 6000.000 1000.000 1 0.48230729E+01 0.26270251E-02-0.95850874E-06 0.16000712E-09-0.97752303E-14 2 0.80734048E+04-0.22017207E+01 0.22571502E+01 0.11304728E-01-0.13671319E-04 3 0.96819806E-08-0.29307182E-11 0.87417744E+04 0.10757992E+02 4 NH And94 N 1H 1 G 200.000 6000.000 1000.000 1 0.27836928E+01 0.13298430E-02-0.42478047E-06 0.78348501E-10-0.55044470E-14 2 0.42120848E+05 0.57407799E+01 0.34929085E+01 0.31179198E-03-0.14890484E-05 3 0.24816442E-08-0.10356967E-11 0.41880629E+05 0.18483278E+01 4 NH2 And89 N 1H 2 G 200.000 6000.000 1000.000 1 0.28347421E+01 0.32073082E-02-0.93390804E-06 0.13702953E-09-0.79206144E-14 2 0.22171957E+05 0.65204163E+01 0.42040029E+01-0.21061385E-02 0.71068348E-05 3 -0.56115197E-08 0.16440717E-11 0.21885910E+05-0.14184248E+00 4 NH3 J 6/77N 1H 3 G 200.000 6000.000 1000.000 1 0.26344521E+01 0.56662560E-02-0.17278676E-05 0.23867161E-09-0.12578786E-13 2 -0.65446958E+04 0.65662928E+01 0.42860274E+01-0.46605230E-02 0.21718513E-04 3 -0.22808887E-07 0.82638046E-11-0.67417285E+04-0.62537277E+00 4 NO RUS 78N 1O 1 G 200.000 6000.000 1000.000 1 0.32606056E+01 0.11911043E-02-0.42917048E-06 0.69457669E-10-0.40336099E-14 2 0.99209746E+04 0.63693027E+01 0.42184763E+01-0.46389760E-02 0.11041022E-04 3 -0.93361354E-08 0.28035770E-11 0.98446230E+04 0.22808464E+01 4 NO2 L 7/88N 1O 2 G 200.000 6000.000 1000.000 1 0.48847542E+01 0.21723956E-02-0.82806906E-06 0.15747510E-09-0.10510895E-13 2 0.23164983E+04-0.11741695E+00 0.39440312E+01-0.15854290E-02 0.16657812E-04 3 -0.20475426E-07 0.78350564E-11 0.28966179E+04 0.63119917E+01 4 HCNO BDEA94H 1N 1C 1O 1G 300.000 5000.000 1382.000 1 6.59860456E+00 3.02778626E-03-1.07704346E-06 1.71666528E-10-1.01439391E-14 2 1.79661339E+04-1.03306599E+01 2.64727989E+00 1.27505342E-02-1.04794236E-05 3 4.41432836E-09-7.57521466E-13 1.92990252E+04 1.07332972E+01 4 HOCN BDEA94H 1N 1C 1O 1G 300.000 5000.000 1368.000 1 5.89784885E+00 3.16789393E-03-1.11801064E-06 1.77243144E-10-1.04339177E-14 2 -3.70653331E+03-6.18167825E+00 3.78604952E+00 6.88667922E-03-3.21487864E-06 3 5.17195767E-10 1.19360788E-14-2.82698400E+03 5.63292162E+00 4 HNCO BDEA94H 1N 1C 1O 1G 300.000 5000.000 1478.000 1 6.22395134E+00 3.17864004E-03-1.09378755E-06 1.70735163E-10-9.95021955E-15 2 -1.66599344E+04-8.38224741E+00 3.63096317E+00 7.30282357E-03-2.28050003E-06 3 -6.61271298E-10 3.62235752E-13-1.55873636E+04 6.19457727E+00 4 NCO EA 93 N 1C 1O 1 G 200.000 6000.000 1000.000 1 0.51521845E+01 0.23051761E-02-0.88033153E-06 0.14789098E-09-0.90977996E-14 2 0.14004123E+05-0.25442660E+01 0.28269308E+01 0.88051688E-02-0.83866134E-05 3 0.48016964E-08-0.13313595E-11 0.14682477E+05 0.95504646E+01 4 CN HBH92 C 1N 1 G 200.000 6000.000 1000.000 1 0.37459805E+01 0.43450775E-04 0.29705984E-06-0.68651806E-10 0.44134173E-14 2 0.51536188E+05 0.27867601E+01 0.36129351E+01-0.95551327E-03 0.21442977E-05 3 -0.31516323E-09-0.46430356E-12 0.51708340E+05 0.39804995E+01 4 HCNN SRI/94C 1N 2H 1 G 300.000 5000.000 1000.000 1 0.58946362E+01 0.39895959E-02-0.15982380E-05 0.29249395E-09-0.20094686E-13 2 0.53452941E+05-0.51030502E+01 0.25243194E+01 0.15960619E-01-0.18816354E-04 3 0.12125540E-07-0.32357378E-11 0.54261984E+05 0.11675870E+02 4 N2 121286N 2 G 300.000 5000.000 1000.000 1 0.02926640E+02 0.14879768E-02-0.05684760E-05 0.10097038E-09-0.06753351E-13 2 -0.09227977E+04 0.05980528E+02 0.03298677E+02 0.14082404E-02-0.03963222E-04 3 0.05641515E-07-0.02444854E-10-0.10208999E+04 0.03950372E+02 4 AR 120186AR 1 G 300.000 5000.000 1000.000 1 0.02500000E+02 0.00000000E+00 0.00000000E+00 0.00000000E+00 0.00000000E+00 2 -0.07453750E+04 0.04366000E+02 0.02500000E+02 0.00000000E+00 0.00000000E+00 3 0.00000000E+00 0.00000000E+00-0.07453750E+04 0.04366000E+02 4 C3H8 L 4/85C 3H 8 G 300.000 5000.000 1000.000 1 0.75341368E+01 0.18872239E-01-0.62718491E-05 0.91475649E-09-0.47838069E-13 2 -0.16467516E+05-0.17892349E+02 0.93355381E+00 0.26424579E-01 0.61059727E-05 3 -0.21977499E-07 0.95149253E-11-0.13958520E+05 0.19201691E+02 4 C3H7 L 9/84C 3H 7 G 300.000 5000.000 1000.000 1 0.77026987E+01 0.16044203E-01-0.52833220E-05 0.76298590E-09-0.39392284E-13 2 0.82984336E+04-0.15480180E+02 0.10515518E+01 0.25991980E-01 0.23800540E-05 3 -0.19609569E-07 0.93732470E-11 0.10631863E+05 0.21122559E+02 4 CH3CHO L 8/88C 2H 4O 1 G 200.000 6000.000 1000.000 1 0.54041108E+01 0.11723059E-01-0.42263137E-05 0.68372451E-09-0.40984863E-13 2 -0.22593122E+05-0.34807917E+01 0.47294595E+01-0.31932858E-02 0.47534921E-04 3 -0.57458611E-07 0.21931112E-10-0.21572878E+05 0.41030159E+01 4 CH2CHO SAND86O 1H 3C 2 G 300.000 5000.000 1000.000 1 0.05975670E+02 0.08130591E-01-0.02743624E-04 0.04070304E-08-0.02176017E-12 2 0.04903218E+04-0.05045251E+02 0.03409062E+02 0.10738574E-01 0.01891492E-04 3 -0.07158583E-07 0.02867385E-10 0.15214766E+04 0.09558290E+02 4 END \\ \\ \\ This is the tran file \\ \\ AR 0 136.500 3.330 0.000 0.000 0.000 C 0 71.400 3.298 0.000 0.000 0.000 ! * C2 1 97.530 3.621 0.000 1.760 4.000 C2O 1 232.400 3.828 0.000 0.000 1.000 ! * CN2 1 232.400 3.828 0.000 0.000 1.000 ! OIS C2H 1 209.000 4.100 0.000 0.000 2.500 C2H2 1 209.000 4.100 0.000 0.000 2.500 C2H2OH 2 224.700 4.162 0.000 0.000 1.000 ! * C2H3 2 209.000 4.100 0.000 0.000 1.000 ! * C2H4 2 280.800 3.971 0.000 0.000 1.500 C2H5 2 252.300 4.302 0.000 0.000 1.500 C2H6 2 252.300 4.302 0.000 0.000 1.500 C2N 1 232.400 3.828 0.000 0.000 1.000 ! OIS C2N2 1 349.000 4.361 0.000 0.000 1.000 ! OIS C3H2 2 209.000 4.100 0.000 0.000 1.000 ! * C3H4 1 252.000 4.760 0.000 0.000 1.000 C3H6 2 266.800 4.982 0.000 0.000 1.000 C3H7 2 266.800 4.982 0.000 0.000 1.000 C4H6 2 357.000 5.180 0.000 0.000 1.000 I*C3H7 2 266.800 4.982 0.000 0.000 1.000 N*C3H7 2 266.800 4.982 0.000 0.000 1.000 C3H8 2 266.800 4.982 0.000 0.000 1.000 C4H 1 357.000 5.180 0.000 0.000 1.000 C4H2 1 357.000 5.180 0.000 0.000 1.000 C4H2OH 2 224.700 4.162 0.000 0.000 1.000 ! * C4H8 2 357.000 5.176 0.000 0.000 1.000 C4H9 2 357.000 5.176 0.000 0.000 1.000 I*C4H9 2 357.000 5.176 0.000 0.000 1.000 C5H2 1 357.000 5.180 0.000 0.000 1.000 C5H3 1 357.000 5.180 0.000 0.000 1.000 C6H2 1 357.000 5.180 0.000 0.000 1.000 C6H5 2 412.300 5.349 0.000 0.000 1.000 ! JAM C6H5O 2 450.000 5.500 0.000 0.000 1.000 ! JAM C5H5OH 2 450.000 5.500 0.000 0.000 1.000 ! JAM C6H6 2 412.300 5.349 0.000 0.000 1.000 ! SVE C6H7 2 412.300 5.349 0.000 0.000 1.000 ! JAM CH 1 80.000 2.750 0.000 0.000 0.000 CH2 1 144.000 3.800 0.000 0.000 0.000 CH2(S) 1 144.000 3.800 0.000 0.000 0.000 CH2* 1 144.000 3.800 0.000 0.000 0.000 CH2CHCCH 2 357.000 5.180 0.000 0.000 1.000 ! JAM CH2CHCCH2 2 357.000 5.180 0.000 0.000 1.000 ! JAM CH2CHCH2 2 260.000 4.850 0.000 0.000 1.000 ! JAM CH2CHCHCH 2 357.000 5.180 0.000 0.000 1.000 ! JAM CH2CHCHCH2 2 357.000 5.180 0.000 0.000 1.000 ! JAM CH2CO 2 436.000 3.970 0.000 0.000 2.000 CH2O 2 498.000 3.590 0.000 0.000 2.000 CH2OH 2 417.000 3.690 1.700 0.000 2.000 CH3 1 144.000 3.800 0.000 0.000 0.000 CH3CC 2 252.000 4.760 0.000 0.000 1.000 ! JAM CH3CCCH2 2 357.000 5.180 0.000 0.000 1.000 ! JAM CH3CCCH3 2 357.000 5.180 0.000 0.000 1.000 ! JAM CH3CCH2 2 260.000 4.850 0.000 0.000 1.000 ! JAM CH3CHCH 2 260.000 4.850 0.000 0.000 1.000 ! JAM CH3CH2CCH 2 357.000 5.180 0.000 0.000 1.000 ! JAM CH3CHO 2 436.000 3.970 0.000 0.000 2.000 CH2CHO 2 436.000 3.970 0.000 0.000 2.000 CH3CO 2 436.000 3.970 0.000 0.000 2.000 CH3O 2 417.000 3.690 1.700 0.000 2.000 CH3OH 2 481.800 3.626 0.000 0.000 1.000 ! SVE CH4 2 141.400 3.746 0.000 2.600 13.000 CH4O 2 417.000 3.690 1.700 0.000 2.000 CN 1 75.000 3.856 0.000 0.000 1.000 ! OIS CNC 1 232.400 3.828 0.000 0.000 1.000 ! OIS CNN 1 232.400 3.828 0.000 0.000 1.000 ! OIS CO 1 98.100 3.650 0.000 1.950 1.800 CO2 1 244.000 3.763 0.000 2.650 2.100 H 0 145.000 2.050 0.000 0.000 0.000 H2C4O 2 357.000 5.180 0.000 0.000 1.000 ! JAM H2 1 38.000 2.920 0.000 0.790 280.000 H2CCCCH 2 357.000 5.180 0.000 0.000 1.000 ! JAM H2CCCCH2 2 357.000 5.180 0.000 0.000 1.000 ! JAM H2CCCH 2 252.000 4.760 0.000 0.000 1.000 ! JAM H2CN 1 569.000 3.630 0.000 0.000 1.000 ! os/jm H2NO 2 116.700 3.492 0.000 0.000 1.000 ! JAM H2O 2 572.400 2.605 1.844 0.000 4.000 H2O2 2 107.400 3.458 0.000 0.000 3.800 HC2N2 1 349.000 4.361 0.000 0.000 1.000 ! OIS HCCHCCH 2 357.000 5.180 0.000 0.000 1.000 ! JAM HCCO 2 150.000 2.500 0.000 0.000 1.000 ! * HCNN 2 150.000 2.500 0.000 0.000 1.000 ! * HCCOH 2 436.000 3.970 0.000 0.000 2.000 HCN 1 569.000 3.630 0.000 0.000 1.000 ! OIS HCO 2 498.000 3.590 0.000 0.000 0.000 HE 0 10.200 2.576 0.000 0.000 0.000 ! * HCNO 2 232.400 3.828 0.000 0.000 1.000 ! JAM HOCN 2 232.400 3.828 0.000 0.000 1.000 ! JAM HNCO 2 232.400 3.828 0.000 0.000 1.000 ! OIS HNNO 2 232.400 3.828 0.000 0.000 1.000 ! * HNO 2 116.700 3.492 0.000 0.000 1.000 ! * HNOH 2 116.700 3.492 0.000 0.000 1.000 ! JAM HO2 2 107.400 3.458 0.000 0.000 1.000 ! * N 0 71.400 3.298 0.000 0.000 0.000 ! * N2 1 97.530 3.621 0.000 1.760 4.000 N2H2 2 71.400 3.798 0.000 0.000 1.000 ! * N2H3 2 200.000 3.900 0.000 0.000 1.000 ! * N2H4 2 205.000 4.230 0.000 4.260 1.500 N2O 1 232.400 3.828 0.000 0.000 1.000 ! * NCN 1 232.400 3.828 0.000 0.000 1.000 ! OIS NCO 1 232.400 3.828 0.000 0.000 1.000 ! OIS NH 1 80.000 2.650 0.000 0.000 4.000 NH2 2 80.000 2.650 0.000 2.260 4.000 NH3 2 481.000 2.920 1.470 0.000 10.000 NNH 2 71.400 3.798 0.000 0.000 1.000 ! * NO 1 97.530 3.621 0.000 1.760 4.000 NCNO 2 232.400 3.828 0.000 0.000 1.000 ! OIS NO2 2 200.000 3.500 0.000 0.000 1.000 ! * O 0 80.000 2.750 0.000 0.000 0.000 O2 1 107.400 3.458 0.000 1.600 3.800 OH 1 80.000 2.750 0.000 0.000 0.000 #endif
philosophers.c
/* philosophers.c * An example of the dining philosophers problem using exactly 5 philosophers. * This program can be modified to use different numbers of philosophers. * */ #include <omp.h> //Use OpenMP #include <stdio.h> //Provide I/O #include <stdlib.h> //Use NULL #include <unistd.h> //Use usleep (sleep for a number of microseconds) #define NUM_PHIL 5 //The number of philosophers #define MEALS 100 //The number of meals per philosopher static omp_lock_t chopsticks[NUM_PHIL]; //Locks to represent chopsticks /* This function represents the philosopher threads. Each philosopher must * wait until all others have started before continuing. Each then eats NUM_PHIL * meals. Philosophers must acquire chopsticks to their left and right before eating. */ void philosopher() { //Wait for all threads to start #pragma omp barrier //Set up variables for the thread id, right chopstick, and left chopstick. int id = omp_get_thread_num(); int right_chopstick; int left_chopstick; //Philosophers 0 through NUM_PHIL-2 reach to the left first then the right. if(id < NUM_PHIL -1) { right_chopstick = id; left_chopstick = id+1; } //Philosopher NUM_PHIL-1 has its left and right swapped. Notice that it reaches in the //opposite order of all other philosophers. else { right_chopstick = 0; left_chopstick = id; } //Acquire chopsticks (semaphores), eat, wait for 100 microseconds, then release //chopsticks (semaphores). int i; for(i = 0; i < MEALS; i++) { omp_set_lock(&chopsticks[left_chopstick]); omp_set_lock(&chopsticks[right_chopstick]); printf("philosopher %d is eating\n", id); usleep(100); omp_unset_lock(&chopsticks[left_chopstick]); omp_unset_lock(&chopsticks[right_chopstick]); } } //Main function. int main(int argc, char ** argv) { int i; //Initialize locks for(i = 0; i < NUM_PHIL; i++) omp_init_lock(&chopsticks[i]); //Create and start philosopher threads. #pragma omp parallel num_threads(NUM_PHIL) { philosopher(); } //Wait for philosophers to finish then destroy locks. for(i = 0; i < NUM_PHIL; i++) omp_destroy_lock(&chopsticks[i]); //End program. return 0; }
GB_matvec_type_name.c
//------------------------------------------------------------------------------ // GB_matvec_type_name: return the name of the type of a matrix //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ #include "GB.h" GrB_Info GB_matvec_type_name // return the name of the type of a matrix ( char *type_name, // name of the type (char array of size at least // GxB_MAX_NAME_LEN, owned by the user application). const GrB_Matrix A, // matrix to query GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- GB_RETURN_IF_NULL (type_name) ; ASSERT_MATRIX_OK (A, "A for type_name", GB0) ; //-------------------------------------------------------------------------- // return the type //-------------------------------------------------------------------------- memcpy (type_name, A->type->name, GxB_MAX_NAME_LEN) ; #pragma omp flush return (GrB_SUCCESS) ; }
nowait-orig-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ // Some threads may finish the for loop early and execute errors = dt[9]+1 // while another thread may still be simultaneously executing // the for worksharing region by writing to d[9], // which may cause a data race. // This is a good test for dynamic tools since the data race does not always happen at runtime. // // Liao, source paper: Ma Symbolic Analysis of Concurrency Errors in OpenMP Programs, ICPP 2013 #include <stdio.h> #include <assert.h> int main() { int i,error; int len = 1000; int a[1000], b=5; for (i=0; i<len; i++) a[i]= i; #pragma omp parallel shared(b, error) { #pragma omp for nowait for(i = 0; i < len; i++) a[i] = b + a[i]*5; #pragma omp single error = a[9] + 1; } printf ("error = %d\n", error); // assert (error==51); return 0; }
GB_unop__trunc_fc64_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__trunc_fc64_fc64) // op(A') function: GB (_unop_tran__trunc_fc64_fc64) // C type: GxB_FC64_t // A type: GxB_FC64_t // cast: GxB_FC64_t cij = aij // unaryop: cij = GB_ctrunc (aij) #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_ctrunc (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = aij ; \ Cx [pC] = GB_ctrunc (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_TRUNC || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__trunc_fc64_fc64) ( GxB_FC64_t *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = GB_ctrunc (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = GB_ctrunc (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__trunc_fc64_fc64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
my_lib.c
#include <TH/TH.h> #include <assert.h> int jagged_argmax_forward(THFloatTensor *values, THLongTensor *prefix_sum, THLongTensor *output) { values = THFloatTensor_newContiguous(values); THLongTensor_resizeAs(output, prefix_sum); float *input_data_base = values->storage->data + values->storageOffset;; long *ps = prefix_sum->storage->data + prefix_sum->storageOffset; long *p_out = output->storage->data + output->storageOffset; long bsize = (long)prefix_sum->size[0]; long i, d; #pragma omp parallel for private(i, d) for (i = 0; i < bsize; i++) { long offset = (i == 0) ? 0 : ps[i - 1]; long n_ele = ps[i] - offset; float* input_data = input_data_base + offset; float max_input = -FLT_MAX; long max_id = -1; for (d = 0; d < n_ele; d++) if (input_data[d] > max_input) { max_input = input_data[d]; max_id = d; } assert(max_id >= 0); p_out[i] = max_id; } THFloatTensor_free(values); return 1; } int jagged_max_forward(THFloatTensor *values, THLongTensor *prefix_sum, THFloatTensor *vmax, THLongTensor *idxes) { int64_t inputsize = prefix_sum->size[0]; values = THFloatTensor_newContiguous(values); THLongTensor_resize1d(idxes, inputsize); THFloatTensor_resize1d(vmax, inputsize); float *input_data_base = values->storage->data + values->storageOffset; long *ps = prefix_sum->storage->data + prefix_sum->storageOffset; float *p_maxv = vmax->storage->data + vmax->storageOffset; long *p_i = idxes->storage->data + idxes->storageOffset; long bsize = (long)prefix_sum->size[0]; long i, d; #pragma omp parallel for private(i, d) for (i = 0; i < bsize; i++) { long offset = (i == 0) ? 0 : ps[i - 1]; long n_ele = ps[i] - offset; float* input_data = input_data_base + offset; float max_input = -FLT_MAX; long max_id = -1; for (d = 0; d < n_ele; d++) if (input_data[d] > max_input) { max_input = input_data[d]; max_id = d; } assert(max_id >= 0); p_i[i] = max_id; p_maxv[i] = max_input; } THFloatTensor_free(values); return 1; } int jagged_log_softmax_forward(THFloatTensor *logits, THLongTensor *prefix_sum, THFloatTensor *output) { logits = THFloatTensor_newContiguous(logits); THFloatTensor_resizeAs(output, logits); float *input_data_base = logits->storage->data + logits->storageOffset;// THTensor_(data)(logits); long *ps = prefix_sum->storage->data + prefix_sum->storageOffset; float *output_data_base = output->storage->data + output->storageOffset; uint64_t bsize = (uint64_t)prefix_sum->size[0]; uint64_t i, d; #pragma omp parallel for private(i, d) for (i = 0; i < bsize; i++) { long offset = (i == 0) ? 0 : ps[i - 1]; float* input_data = input_data_base + offset; float* output_data = output_data_base + offset; long n_ele = ps[i] - offset; float max_input = -FLT_MAX; for (d = 0; d < n_ele; d++) max_input = THMax(max_input, input_data[d]); double logsum = 0; for (d = 0; d < n_ele; d++) logsum += exp(input_data[d] - max_input); logsum = max_input + log(logsum); for (d = 0; d < n_ele; d++) output_data[d] = input_data[d] - logsum; } THFloatTensor_free(logits); return 1; } int jagged_log_softmax_backward(THFloatTensor *output, THFloatTensor *grad_output, THLongTensor *prefix_sum, THFloatTensor *grad_input) { grad_output = THFloatTensor_newContiguous(grad_output); output = THFloatTensor_newContiguous(output); THFloatTensor_resizeAs(grad_input, grad_output); float *output_data_base = output->storage->data + output->storageOffset; float *gradOutput_data_base = grad_output->storage->data + grad_output->storageOffset; long *ps = prefix_sum->storage->data + prefix_sum->storageOffset; float *gradInput_data_base = grad_input->storage->data + grad_input->storageOffset; uint64_t bsize = (uint64_t)prefix_sum->size[0]; uint64_t i, d; #pragma omp parallel for private(i, d) for (i = 0; i < bsize; i++) { long offset = (i == 0) ? 0 : ps[i - 1]; float *gradInput_data = gradInput_data_base + offset; float *output_data = output_data_base + offset; float *gradOutput_data = gradOutput_data_base + offset; double sum = 0; long n_ele = ps[i] - offset; for (d = 0; d < n_ele; d++) sum += gradOutput_data[d]; for (d = 0; d < n_ele; d++) gradInput_data[d] = gradOutput_data[d] - exp(output_data[d]) * sum; } THFloatTensor_free(grad_output); THFloatTensor_free(output); return 1; } int graph_laplacian_norm(THLongTensor *indices, THFloatTensor *values, THFloatTensor *norm) { uint64_t nnz = (uint64_t)values->size[0]; long *row_indices = indices->storage->data + indices->storageOffset; long *col_indices = row_indices + indices->stride[0]; float *p_v = values->storage->data + values->storageOffset; float *p_norm = norm->storage->data + norm->storageOffset; uint64_t i; #pragma omp parallel for private(i) for (i = 0; i < nnz; i++) { float norm = p_norm[ row_indices[i] ] * p_norm[ col_indices[i] ]; p_v[i] /= norm; } return 1; } int graph_degree_norm(THLongTensor *indices, THFloatTensor *values, THFloatTensor *norm) { uint64_t nnz = (uint64_t)values->size[0]; long *row_indices = indices->storage->data + indices->storageOffset; float *p_v = values->storage->data + values->storageOffset; float *p_norm = norm->storage->data + norm->storageOffset; uint64_t i; #pragma omp parallel for private(i) for (i = 0; i < nnz; i++) { float norm = p_norm[ row_indices[i] ]; p_v[i] /= norm; } return 1; }
residual_based_adjoint_bossak_scheme.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: // #if !defined(KRATOS_RESIDUAL_BASED_ADJOINT_BOSSAK_SCHEME_H_INCLUDED) #define KRATOS_RESIDUAL_BASED_ADJOINT_BOSSAK_SCHEME_H_INCLUDED // System includes #include <vector> #include <string> #include <unordered_set> #include <functional> // External includes // Project includes #include "includes/define.h" #include "includes/checks.h" #include "includes/kratos_parameters.h" #include "solving_strategies/schemes/scheme.h" #include "response_functions/adjoint_response_function.h" #include "utilities/variable_utils.h" #include "utilities/indirect_scalar.h" #include "utilities/adjoint_extensions.h" namespace Kratos { ///@name Kratos Classes ///@{ /// A scheme for dynamic adjoint equations, using Bossak time integration. /** * It can be used for either first- or second-order time derivatives. Elements * and conditions must provide a specialization of AdjointExtensions via their * data value container, which allows the scheme to operate independently of * the variable arrangements in the element or condition. */ template <class TSparseSpace, class TDenseSpace> class ResidualBasedAdjointBossakScheme : public Scheme<TSparseSpace, TDenseSpace> { public: ///@name Type Definitions ///@{ KRATOS_CLASS_POINTER_DEFINITION(ResidualBasedAdjointBossakScheme); typedef Scheme<TSparseSpace, TDenseSpace> BaseType; typedef typename BaseType::TSystemMatrixType SystemMatrixType; typedef typename BaseType::TSystemVectorType SystemVectorType; typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType; typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType; typedef typename BaseType::DofsArrayType DofsArrayType; ///@} ///@name Life Cycle ///@{ /// Constructor. ResidualBasedAdjointBossakScheme( Parameters Settings, AdjointResponseFunction::Pointer pResponseFunction ) : mpResponseFunction(pResponseFunction) { Parameters default_parameters(R"({ "name" : "adjoint_bossak", "scheme_type" : "bossak", "alpha_bossak" : -0.3 })"); Settings.ValidateAndAssignDefaults(default_parameters); mBossak.Alpha = Settings["alpha_bossak"].GetDouble(); } /// Destructor. ~ResidualBasedAdjointBossakScheme() override { } ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ void Initialize(ModelPart& rModelPart) override { KRATOS_TRY; BaseType::Initialize(rModelPart); // Allocate auxiliary memory. int num_threads = OpenMPUtils::GetNumThreads(); mLeftHandSide.resize(num_threads); mResponseGradient.resize(num_threads); mFirstDerivsLHS.resize(num_threads); mFirstDerivsResponseGradient.resize(num_threads); mSecondDerivsLHS.resize(num_threads); mSecondDerivsResponseGradient.resize(num_threads); mAdjointValuesVector.resize(num_threads); mAdjointIndirectVector2.resize(num_threads); mAdjointIndirectVector3.resize(num_threads); mAuxAdjointIndirectVector1.resize(num_threads); InitializeNodeNeighbourCount(rModelPart.Nodes()); rModelPart.GetProcessInfo()[BOSSAK_ALPHA] = mBossak.Alpha; KRATOS_CATCH(""); } void InitializeSolutionStep(ModelPart& rModelPart, SystemMatrixType& rA, SystemVectorType& rDx, SystemVectorType& rb) override { KRATOS_TRY; BaseType::InitializeSolutionStep(rModelPart, rA, rDx, rb); const auto& r_current_process_info = rModelPart.GetProcessInfo(); mBossak = CalculateBossakConstants(mBossak.Alpha, GetTimeStep(r_current_process_info)); this->CalculateNodeNeighbourCount(rModelPart); KRATOS_CATCH(""); } void FinalizeSolutionStep(ModelPart& rModelPart, SystemMatrixType& rA, SystemVectorType& rDx, SystemVectorType& rb) override { KRATOS_TRY; BaseType::FinalizeSolutionStep(rModelPart, rA, rDx, rb); this->UpdateAuxiliaryVariable(rModelPart); KRATOS_CATCH(""); } void Update(ModelPart& rModelPart, DofsArrayType& rDofSet, SystemMatrixType& rA, SystemVectorType& rDx, SystemVectorType& rb) override { KRATOS_TRY; // Update degrees of freedom: adjoint variables associated to the // residual of the physical problem. this->mpDofUpdater->UpdateDofs(rDofSet, rDx); // Update adjoint variables associated to time integration. this->UpdateTimeSchemeAdjoints(rModelPart); KRATOS_CATCH(""); } void CalculateSystemContributions(Element& rCurrentElement, LocalSystemMatrixType& rLHS_Contribution, LocalSystemVectorType& rRHS_Contribution, Element::EquationIdVectorType& rEquationId, const ProcessInfo& rCurrentProcessInfo) override { KRATOS_TRY; const auto k = OpenMPUtils::ThisThread(); rCurrentElement.GetValuesVector(mAdjointValuesVector[k]); const auto local_size = mAdjointValuesVector[k].size(); if (rRHS_Contribution.size() != local_size) { rRHS_Contribution.resize(local_size, false); } if (rLHS_Contribution.size1() != local_size || rLHS_Contribution.size2() != local_size) { rLHS_Contribution.resize(local_size, local_size, false); } this->CheckAndResizeThreadStorage(local_size); this->CalculateGradientContributions(rCurrentElement, rLHS_Contribution, rRHS_Contribution, rCurrentProcessInfo); this->CalculateFirstDerivativeContributions( rCurrentElement, rLHS_Contribution, rRHS_Contribution, rCurrentProcessInfo); this->CalculateSecondDerivativeContributions( rCurrentElement, rLHS_Contribution, rRHS_Contribution, rCurrentProcessInfo); this->CalculatePreviousTimeStepContributions( rCurrentElement, rLHS_Contribution, rRHS_Contribution, rCurrentProcessInfo); this->CalculateResidualLocalContributions( rCurrentElement, rLHS_Contribution, rRHS_Contribution, rCurrentProcessInfo); rCurrentElement.EquationIdVector(rEquationId, rCurrentProcessInfo); KRATOS_CATCH(""); } void CalculateLHSContribution(Element& rCurrentElement, LocalSystemMatrixType& rLHS_Contribution, Element::EquationIdVectorType& rEquationId, const ProcessInfo& rCurrentProcessInfo) override { KRATOS_TRY; LocalSystemVectorType RHS_Contribution; CalculateSystemContributions(rCurrentElement, rLHS_Contribution, RHS_Contribution, rEquationId, rCurrentProcessInfo); KRATOS_CATCH(""); } void CalculateSystemContributions(Condition& rCurrentCondition, LocalSystemMatrixType& rLHS_Contribution, LocalSystemVectorType& rRHS_Contribution, Condition::EquationIdVectorType& rEquationId, const ProcessInfo& rCurrentProcessInfo) override { KRATOS_TRY; // NOT TESTED !!! rCurrentCondition.CalculateLocalSystem( rLHS_Contribution, rRHS_Contribution, rCurrentProcessInfo); KRATOS_CATCH(""); } void CalculateLHSContribution(Condition& rCurrentCondition, LocalSystemMatrixType& rLHS_Contribution, Condition::EquationIdVectorType& rEquationId, const ProcessInfo& rCurrentProcessInfo) override { KRATOS_TRY; LocalSystemVectorType RHS_Contribution; CalculateSystemContributions(rCurrentCondition, rLHS_Contribution, RHS_Contribution, rEquationId, rCurrentProcessInfo); KRATOS_CATCH(""); } void Clear() override { this->mpDofUpdater->Clear(); } ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. std::string Info() const override { return "ResidualBasedAdjointBossakScheme"; } /// Print information about this object. void PrintInfo(std::ostream& rOStream) const override { rOStream << Info(); } /// Print object's data. void PrintData(std::ostream& rOStream) const override { rOStream << Info(); } ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: struct BossakConstants { double Alpha; double Beta; double Gamma; double C0; double C1; double C2; double C3; double C4; double C5; double C6; double C7; }; ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ BossakConstants mBossak; typename TSparseSpace::DofUpdaterPointerType mpDofUpdater = TSparseSpace::CreateDofUpdater(); AdjointResponseFunction::Pointer mpResponseFunction; std::vector<LocalSystemMatrixType> mLeftHandSide; std::vector<LocalSystemVectorType> mResponseGradient; std::vector<LocalSystemMatrixType> mFirstDerivsLHS; std::vector<LocalSystemVectorType> mFirstDerivsResponseGradient; std::vector<LocalSystemMatrixType> mSecondDerivsLHS; std::vector<LocalSystemVectorType> mSecondDerivsResponseGradient; std::vector<LocalSystemVectorType> mAdjointValuesVector; std::vector<std::vector<IndirectScalar<double>>> mAdjointIndirectVector2; std::vector<std::vector<IndirectScalar<double>>> mAdjointIndirectVector3; std::vector<std::vector<IndirectScalar<double>>> mAuxAdjointIndirectVector1; ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ void CalculateGradientContributions(Element& rCurrentElement, LocalSystemMatrixType& rLHS_Contribution, LocalSystemVectorType& rRHS_Contribution, const ProcessInfo& rCurrentProcessInfo) { int k = OpenMPUtils::ThisThread(); rCurrentElement.CalculateLeftHandSide(mLeftHandSide[k], rCurrentProcessInfo); this->mpResponseFunction->CalculateGradient( rCurrentElement, mLeftHandSide[k], mResponseGradient[k], rCurrentProcessInfo); noalias(rLHS_Contribution) = mLeftHandSide[k]; noalias(rRHS_Contribution) = -1. * mResponseGradient[k]; } void CalculateFirstDerivativeContributions(Element& rCurrentElement, LocalSystemMatrixType& rLHS_Contribution, LocalSystemVectorType& rRHS_Contribution, const ProcessInfo& rCurrentProcessInfo) { int k = OpenMPUtils::ThisThread(); rCurrentElement.CalculateFirstDerivativesLHS(mFirstDerivsLHS[k], rCurrentProcessInfo); mpResponseFunction->CalculateFirstDerivativesGradient( rCurrentElement, mFirstDerivsLHS[k], mFirstDerivsResponseGradient[k], rCurrentProcessInfo); noalias(rLHS_Contribution) += mBossak.C6 * mFirstDerivsLHS[k]; noalias(rRHS_Contribution) -= mBossak.C6 * mFirstDerivsResponseGradient[k]; } void CalculateSecondDerivativeContributions(Element& rCurrentElement, LocalSystemMatrixType& rLHS_Contribution, LocalSystemVectorType& rRHS_Contribution, const ProcessInfo& rCurrentProcessInfo) { int k = OpenMPUtils::ThisThread(); auto& r_response_function = *(this->mpResponseFunction); rCurrentElement.CalculateSecondDerivativesLHS(mSecondDerivsLHS[k], rCurrentProcessInfo); mSecondDerivsLHS[k] *= (1.0 - mBossak.Alpha); r_response_function.CalculateSecondDerivativesGradient( rCurrentElement, mSecondDerivsLHS[k], mSecondDerivsResponseGradient[k], rCurrentProcessInfo); noalias(rLHS_Contribution) += mBossak.C7 * mSecondDerivsLHS[k]; noalias(rRHS_Contribution) -= mBossak.C7 * mSecondDerivsResponseGradient[k]; } void CalculatePreviousTimeStepContributions(Element& rCurrentElement, LocalSystemMatrixType& rLHS_Contribution, LocalSystemVectorType& rRHS_Contribution, const ProcessInfo& rCurrentProcessInfo) { const auto& r_geometry = rCurrentElement.GetGeometry(); const auto k = OpenMPUtils::ThisThread(); auto& r_extensions = *rCurrentElement.GetValue(ADJOINT_EXTENSIONS); unsigned local_index = 0; for (unsigned i_node = 0; i_node < r_geometry.PointsNumber(); ++i_node) { auto& r_node = r_geometry[i_node]; r_extensions.GetFirstDerivativesVector(i_node, mAdjointIndirectVector2[k], 1); r_extensions.GetSecondDerivativesVector(i_node, mAdjointIndirectVector3[k], 1); r_extensions.GetAuxiliaryVector(i_node, mAuxAdjointIndirectVector1[k], 1); const double weight = 1.0 / r_node.GetValue(NUMBER_OF_NEIGHBOUR_ELEMENTS); for (unsigned d = 0; d < mAdjointIndirectVector2[k].size(); ++d) { rRHS_Contribution[local_index] += weight * (mBossak.C7 * mAuxAdjointIndirectVector1[k][d] + mBossak.C4 * mAdjointIndirectVector2[k][d] + mBossak.C5 * mAdjointIndirectVector3[k][d]); ++local_index; } } } void CalculateResidualLocalContributions(Element& rCurrentElement, LocalSystemMatrixType& rLHS_Contribution, LocalSystemVectorType& rRHS_Contribution, const ProcessInfo& rCurrentProcessInfo) { int k = OpenMPUtils::ThisThread(); auto& r_residual_adjoint = mAdjointValuesVector[k]; rCurrentElement.GetValuesVector(r_residual_adjoint); noalias(rRHS_Contribution) -= prod(rLHS_Contribution, r_residual_adjoint); } void InitializeNodeNeighbourCount(ModelPart::NodesContainerType& rNodes) { // This loop should not be omp parallel // The operation is not threadsafe if the value is uninitialized for (auto& r_node : rNodes) r_node.SetValue(NUMBER_OF_NEIGHBOUR_ELEMENTS, 0.0); } void CalculateNodeNeighbourCount(ModelPart& rModelPart) { // Calculate number of neighbour elements for each node. const int num_nodes = rModelPart.NumberOfNodes(); #pragma omp parallel for for (int i = 0; i < num_nodes; ++i) { Node<3>& r_node = *(rModelPart.Nodes().begin() + i); r_node.SetValue(NUMBER_OF_NEIGHBOUR_ELEMENTS, 0.0); } const int num_elements = rModelPart.NumberOfElements(); #pragma omp parallel for for (int i = 0; i < num_elements; ++i) { Element& r_element = *(rModelPart.Elements().begin() + i); Geometry<Node<3>>& r_geometry = r_element.GetGeometry(); for (unsigned j = 0; j < r_geometry.PointsNumber(); ++j) { double& r_num_neighbour = r_geometry[j].GetValue(NUMBER_OF_NEIGHBOUR_ELEMENTS); #pragma omp atomic r_num_neighbour += 1.0; } } rModelPart.GetCommunicator().AssembleNonHistoricalData(NUMBER_OF_NEIGHBOUR_ELEMENTS); } void UpdateTimeSchemeAdjoints(ModelPart& rModelPart) { KRATOS_TRY; auto lambda2_vars = GatherVariables( rModelPart.Elements(), [](const AdjointExtensions& rExtensions, std::vector<const VariableData*>& rVec) { rExtensions.GetFirstDerivativesVariables(rVec); }); auto lambda3_vars = GatherVariables( rModelPart.Elements(), [](const AdjointExtensions& rExtensions, std::vector<const VariableData*>& rVec) { return rExtensions.GetSecondDerivativesVariables(rVec); }); SetToZero_AdjointVars(lambda2_vars, rModelPart.Nodes()); SetToZero_AdjointVars(lambda3_vars, rModelPart.Nodes()); const int number_of_elements = rModelPart.NumberOfElements(); const ProcessInfo& r_process_info = rModelPart.GetProcessInfo(); Vector adjoint2_aux, adjoint3_aux; std::vector<IndirectScalar<double>> adjoint2_old, adjoint3_old; #pragma omp parallel for private(adjoint2_aux, adjoint3_aux, adjoint2_old, adjoint3_old) for (int i = 0; i < number_of_elements; ++i) { Element& r_element = *(rModelPart.ElementsBegin() + i); const int k = OpenMPUtils::ThisThread(); r_element.GetValuesVector(mAdjointValuesVector[k]); this->CheckAndResizeThreadStorage(mAdjointValuesVector[k].size()); r_element.CalculateFirstDerivativesLHS(mFirstDerivsLHS[k], r_process_info); this->mpResponseFunction->CalculateFirstDerivativesGradient( r_element, mFirstDerivsLHS[k], mFirstDerivsResponseGradient[k], r_process_info); r_element.CalculateSecondDerivativesLHS(mSecondDerivsLHS[k], r_process_info); mSecondDerivsLHS[k] *= (1.0 - mBossak.Alpha); this->mpResponseFunction->CalculateSecondDerivativesGradient( r_element, mSecondDerivsLHS[k], mSecondDerivsResponseGradient[k], r_process_info); if (adjoint2_aux.size() != mFirstDerivsResponseGradient[k].size()) adjoint2_aux.resize(mFirstDerivsResponseGradient[k].size(), false); noalias(adjoint2_aux) = -mFirstDerivsResponseGradient[k] - prod(mFirstDerivsLHS[k], mAdjointValuesVector[k]); if (adjoint3_aux.size() != mSecondDerivsResponseGradient[k].size()) adjoint3_aux.resize(mSecondDerivsResponseGradient[k].size(), false); noalias(adjoint3_aux) = -mSecondDerivsResponseGradient[k] - prod(mSecondDerivsLHS[k], mAdjointValuesVector[k]); auto& r_extensions = *r_element.GetValue(ADJOINT_EXTENSIONS); // Assemble the contributions to the corresponding nodal unknowns. unsigned local_index = 0; Geometry<Node<3>>& r_geometry = r_element.GetGeometry(); for (unsigned i_node = 0; i_node < r_geometry.PointsNumber(); ++i_node) { r_extensions.GetFirstDerivativesVector( i_node, mAdjointIndirectVector2[k], 0); r_extensions.GetSecondDerivativesVector( i_node, mAdjointIndirectVector3[k], 0); r_extensions.GetFirstDerivativesVector(i_node, adjoint2_old, 1); r_extensions.GetSecondDerivativesVector(i_node, adjoint3_old, 1); r_extensions.GetAuxiliaryVector(i_node, mAuxAdjointIndirectVector1[k], 1); Node<3>& r_node = r_geometry[i_node]; const double weight = 1.0 / r_node.GetValue(NUMBER_OF_NEIGHBOUR_ELEMENTS); r_node.SetLock(); for (unsigned d = 0; d < mAdjointIndirectVector2[k].size(); ++d) { mAdjointIndirectVector2[k][d] += adjoint2_aux[local_index]; mAdjointIndirectVector2[k][d] += mBossak.C0 * weight * adjoint2_old[d]; mAdjointIndirectVector2[k][d] += mBossak.C1 * weight * adjoint3_old[d]; mAdjointIndirectVector3[k][d] += adjoint3_aux[local_index]; mAdjointIndirectVector3[k][d] += mBossak.C2 * weight * adjoint2_old[d]; mAdjointIndirectVector3[k][d] += mBossak.C3 * weight * adjoint3_old[d]; mAdjointIndirectVector3[k][d] += weight * mAuxAdjointIndirectVector1[k][d]; ++local_index; } r_node.UnSetLock(); } } // Finalize global assembly Assemble_AdjointVars(lambda2_vars, rModelPart.GetCommunicator()); Assemble_AdjointVars(lambda3_vars, rModelPart.GetCommunicator()); KRATOS_CATCH(""); } void UpdateAuxiliaryVariable(ModelPart& rModelPart) { KRATOS_TRY; auto aux_vars = GatherVariables( rModelPart.Elements(), [](const AdjointExtensions& rExtensions, std::vector<const VariableData*>& rOut) { return rExtensions.GetAuxiliaryVariables(rOut); }); SetToZero_AdjointVars(aux_vars, rModelPart.Nodes()); // Loop over elements to assemble the remaining terms const int number_of_elements = rModelPart.NumberOfElements(); const ProcessInfo& r_process_info = rModelPart.GetProcessInfo(); Vector aux_adjoint_vector; #pragma omp parallel for private(aux_adjoint_vector) for (int i = 0; i < number_of_elements; ++i) { Element& r_element = *(rModelPart.ElementsBegin() + i); const int k = OpenMPUtils::ThisThread(); r_element.GetValuesVector(mAdjointValuesVector[k]); this->CheckAndResizeThreadStorage(mAdjointValuesVector[k].size()); r_element.CalculateSecondDerivativesLHS(mSecondDerivsLHS[k], r_process_info); mSecondDerivsLHS[k] *= mBossak.Alpha; this->mpResponseFunction->CalculateSecondDerivativesGradient( r_element, mSecondDerivsLHS[k], mSecondDerivsResponseGradient[k], r_process_info); if (aux_adjoint_vector.size() != mSecondDerivsLHS[k].size1()) aux_adjoint_vector.resize(mSecondDerivsLHS[k].size1(), false); noalias(aux_adjoint_vector) = prod(mSecondDerivsLHS[k], mAdjointValuesVector[k]) + mSecondDerivsResponseGradient[k]; auto& r_extensions = *r_element.GetValue(ADJOINT_EXTENSIONS); // Assemble the contributions to the corresponding nodal unknowns. unsigned local_index = 0; Geometry<Node<3>>& r_geometry = r_element.GetGeometry(); for (unsigned i_node = 0; i_node < r_geometry.PointsNumber(); ++i_node) { Node<3>& r_node = r_geometry[i_node]; r_extensions.GetAuxiliaryVector(i_node, mAuxAdjointIndirectVector1[k], 0); r_node.SetLock(); for (unsigned d = 0; d < mAuxAdjointIndirectVector1[k].size(); ++d) { mAuxAdjointIndirectVector1[k][d] -= aux_adjoint_vector[local_index]; ++local_index; } r_node.UnSetLock(); } } // Finalize global assembly Assemble_AdjointVars(aux_vars, rModelPart.GetCommunicator()); KRATOS_CATCH(""); } void CheckAndResizeThreadStorage(unsigned SystemSize) { const int k = OpenMPUtils::ThisThread(); if (mLeftHandSide[k].size1() != SystemSize || mLeftHandSide[k].size2() != SystemSize) { mLeftHandSide[k].resize(SystemSize, SystemSize, false); } if (mFirstDerivsLHS[k].size1() != SystemSize || mFirstDerivsLHS[k].size2() != SystemSize) { mFirstDerivsLHS[k].resize(SystemSize, SystemSize, false); } if (mSecondDerivsLHS[k].size1() != SystemSize || mSecondDerivsLHS[k].size2() != SystemSize) { mSecondDerivsLHS[k].resize(SystemSize, SystemSize, false); } if (mResponseGradient[k].size() != SystemSize) { mResponseGradient[k].resize(SystemSize, false); } if (mFirstDerivsResponseGradient[k].size() != SystemSize) { mFirstDerivsResponseGradient[k].resize(SystemSize, false); } if (mSecondDerivsResponseGradient[k].size() != SystemSize) { mSecondDerivsResponseGradient[k].resize(SystemSize, false); } } static BossakConstants CalculateBossakConstants(double Alpha, double DeltaTime) { BossakConstants bc; bc.Alpha = Alpha; bc.Beta = 0.25 * (1.0 - bc.Alpha) * (1.0 - bc.Alpha); bc.Gamma = 0.5 - bc.Alpha; bc.C0 = 1.0 - bc.Gamma / bc.Beta; bc.C1 = -1.0 / (bc.Beta * DeltaTime); bc.C2 = (1.0 - 0.5 * bc.Gamma / bc.Beta) * DeltaTime; bc.C3 = (1.0 - 0.5 / bc.Beta); bc.C4 = (bc.Beta - bc.Gamma * (bc.Gamma + 0.5)) / (DeltaTime * bc.Beta * bc.Beta); bc.C5 = -1.0 * (bc.Gamma + 0.5) / (DeltaTime * DeltaTime * bc.Beta * bc.Beta); bc.C6 = bc.Gamma / (bc.Beta * DeltaTime); bc.C7 = 1.0 / (DeltaTime * DeltaTime * bc.Beta); return bc; } static double GetTimeStep(const ProcessInfo& rCurrentProcessInfo) { const ProcessInfo& r_last_process_info = rCurrentProcessInfo.GetPreviousSolutionStepInfo(1); // Note: solution is backwards in time, but we still want a positive // time step // (it is the time step in the "forward" Bossak scheme). double time_step = r_last_process_info.GetValue(TIME) - rCurrentProcessInfo.GetValue(TIME); KRATOS_ERROR_IF(time_step <= 0.0) << "Backwards in time solution is not decreasing time from last " "step." << std::endl; return time_step; } struct Hash { std::size_t operator()(const VariableData* const& p) const { return p->Key(); } }; struct Pred { bool operator()(const VariableData* const l, const VariableData* const r) const { return *l == *r; } }; // Gathers variables needed for assembly. static std::vector<const VariableData*> GatherVariables( const ModelPart::ElementsContainerType& rElements, std::function<void(const AdjointExtensions&, std::vector<const VariableData*>&)> GetLocalVars) { KRATOS_TRY; const int num_threads = OpenMPUtils::GetNumThreads(); std::vector<const VariableData*> local_vars; std::vector<std::unordered_set<const VariableData*, Hash, Pred>> thread_vars(num_threads); #pragma omp parallel for private(local_vars) for (int i = 0; i < static_cast<int>(rElements.size()); ++i) { auto& r_element = *(rElements.begin() + i); GetLocalVars(*r_element.GetValue(ADJOINT_EXTENSIONS), local_vars); const int k = OpenMPUtils::ThisThread(); thread_vars[k].insert(local_vars.begin(), local_vars.end()); } std::unordered_set<const VariableData*, Hash, Pred> all_vars; for (int i = 0; i < num_threads; ++i) { all_vars.insert(thread_vars[i].begin(), thread_vars[i].end()); } return std::vector<const VariableData*>{all_vars.begin(), all_vars.end()}; KRATOS_CATCH(""); } static void SetToZero_AdjointVars(const std::vector<const VariableData*>& rVariables, ModelPart::NodesContainerType& rNodes) { KRATOS_TRY; for (auto p_variable_data : rVariables) { if (KratosComponents<Variable<array_1d<double, 3>>>::Has( p_variable_data->Name())) { const auto& r_variable = KratosComponents<Variable<array_1d<double, 3>>>::Get( p_variable_data->Name()); VariableUtils().SetHistoricalVariableToZero(r_variable, rNodes); } else if (KratosComponents<Variable<double>>::Has(p_variable_data->Name())) { const auto& r_variable = KratosComponents<Variable<double>>::Get(p_variable_data->Name()); VariableUtils().SetHistoricalVariableToZero(r_variable, rNodes); } else { KRATOS_ERROR << "Variable \"" << p_variable_data->Name() << "\" not found!\n"; } } KRATOS_CATCH(""); } static void Assemble_AdjointVars(const std::vector<const VariableData*>& rVariables, Communicator& rComm) { KRATOS_TRY; for (auto p_variable_data : rVariables) { if (KratosComponents<Variable<array_1d<double, 3>>>::Has( p_variable_data->Name())) { const auto& r_variable = KratosComponents<Variable<array_1d<double, 3>>>::Get( p_variable_data->Name()); rComm.AssembleCurrentData(r_variable); } else if (KratosComponents<Variable<double>>::Has(p_variable_data->Name())) { const auto& r_variable = KratosComponents<Variable<double>>::Get(p_variable_data->Name()); rComm.AssembleCurrentData(r_variable); } else { KRATOS_ERROR << "Variable \"" << p_variable_data->Name() << "\" not found!\n"; } } KRATOS_CATCH(""); } ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ ///@} }; /* Class ResidualBasedAdjointBossakScheme */ ///@} ///@name Type Definitions ///@{ ///@} } /* namespace Kratos.*/ #endif /* KRATOS_RESIDUAL_BASED_ADJOINT_BOSSAK_SCHEME_H_INCLUDED defined */
kdtree.h
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #ifndef VECTORSEARCH_THIRD_PARTY_SPTAG_KDTREE_H_ #define VECTORSEARCH_THIRD_PARTY_SPTAG_KDTREE_H_ #include <iostream> #include <string> #include <vector> #include "vector_index.h" #include "common_utils.h" #include "query_result_set.h" #include "workspace.h" #include <xmmintrin.h> #pragma warning(disable : 4996) // 'fopen': This function or variable may be // unsafe. Consider using fopen_s instead. To // disable deprecation, use // _CRT_SECURE_NO_WARNINGS. See online help for // details. namespace vsearch { namespace COMMON { // node type for storing KDT struct KDTNode { int left; int right; short split_dim; float split_value; }; class KDTree { public: KDTree() : m_iTreeNumber(2), m_numTopDimensionKDTSplit(5), m_iSamples(1000) {} KDTree(KDTree& other) : m_iTreeNumber(other.m_iTreeNumber), m_numTopDimensionKDTSplit(other.m_numTopDimensionKDTSplit), m_iSamples(other.m_iSamples) {} ~KDTree() {} inline const KDTNode& operator[](int index) const { return m_pTreeRoots[index]; } inline KDTNode& operator[](int index) { return m_pTreeRoots[index]; } inline int size() const { return (int)m_pTreeRoots.size(); } template <typename T> void BuildTrees(VectorIndex* p_index, std::vector<int>* indices = nullptr) { std::vector<int> localindices; if (indices == nullptr) { localindices.resize(p_index->GetNumSamples()); for (int i = 0; i < p_index->GetNumSamples(); i++) localindices[i] = i; } else { localindices.assign(indices->begin(), indices->end()); } m_pTreeRoots.resize(m_iTreeNumber * localindices.size()); m_pTreeStart.resize(m_iTreeNumber, 0); #pragma omp parallel for for (int i = 0; i < m_iTreeNumber; i++) { Sleep(i * 100); std::srand(clock()); std::vector<int> pindices(localindices.begin(), localindices.end()); std::random_shuffle(pindices.begin(), pindices.end()); m_pTreeStart[i] = i * (int)pindices.size(); std::cout << "Start to build KDTree " << i + 1 << std::endl; int iTreeSize = m_pTreeStart[i]; DivideTree<T>(p_index, pindices, 0, (int)pindices.size() - 1, m_pTreeStart[i], iTreeSize); std::cout << i + 1 << " KDTree built, " << iTreeSize - m_pTreeStart[i] << " " << pindices.size() << std::endl; } } bool SaveTrees(std::string sTreeFileName) const { std::cout << "Save KDT to " << sTreeFileName << std::endl; FILE* fp = fopen(sTreeFileName.c_str(), "wb"); if (fp == NULL) return false; fwrite(&m_iTreeNumber, sizeof(int), 1, fp); fwrite(m_pTreeStart.data(), sizeof(int), m_iTreeNumber, fp); int treeNodeSize = (int)m_pTreeRoots.size(); fwrite(&treeNodeSize, sizeof(int), 1, fp); fwrite(m_pTreeRoots.data(), sizeof(KDTNode), treeNodeSize, fp); fclose(fp); std::cout << "Save KDT (" << m_iTreeNumber << "," << treeNodeSize << ") Finish!" << std::endl; return true; } bool LoadTrees(char* pKDTMemFile) { m_iTreeNumber = *((int*)pKDTMemFile); pKDTMemFile += sizeof(int); m_pTreeStart.resize(m_iTreeNumber); memcpy(m_pTreeStart.data(), pKDTMemFile, sizeof(int) * m_iTreeNumber); pKDTMemFile += sizeof(int) * m_iTreeNumber; int treeNodeSize = *((int*)pKDTMemFile); pKDTMemFile += sizeof(int); m_pTreeRoots.resize(treeNodeSize); memcpy(m_pTreeRoots.data(), pKDTMemFile, sizeof(KDTNode) * treeNodeSize); return true; } bool LoadTrees(std::string sTreeFileName) { std::cout << "Load KDT From " << sTreeFileName << std::endl; FILE* fp = fopen(sTreeFileName.c_str(), "rb"); if (fp == NULL) return false; fread(&m_iTreeNumber, sizeof(int), 1, fp); m_pTreeStart.resize(m_iTreeNumber); fread(m_pTreeStart.data(), sizeof(int), m_iTreeNumber, fp); int treeNodeSize; fread(&treeNodeSize, sizeof(int), 1, fp); m_pTreeRoots.resize(treeNodeSize); fread(m_pTreeRoots.data(), sizeof(KDTNode), treeNodeSize, fp); fclose(fp); std::cout << "Load KDT (" << m_iTreeNumber << "," << treeNodeSize << ") Finish!" << std::endl; return true; } template <typename T> void InitSearchTrees(const VectorIndex* p_index, const COMMON::QueryResultSet<T>& p_query, COMMON::WorkSpace& p_space, const int p_limits) const { for (char i = 0; i < m_iTreeNumber; i++) { KDTSearch(p_index, p_query, p_space, m_pTreeStart[i], true, 0); } while (!p_space.m_SPTQueue.empty() && p_space.m_iNumberOfCheckedLeaves < p_limits) { auto& tcell = p_space.m_SPTQueue.pop(); if (p_query.worstDist() < tcell.distance) break; KDTSearch(p_index, p_query, p_space, tcell.node, true, tcell.distance); } } template <typename T> void SearchTrees(const VectorIndex* p_index, const COMMON::QueryResultSet<T>& p_query, COMMON::WorkSpace& p_space, const int p_limits) const { while (!p_space.m_SPTQueue.empty() && p_space.m_iNumberOfCheckedLeaves < p_limits) { auto& tcell = p_space.m_SPTQueue.pop(); KDTSearch(p_index, p_query, p_space, tcell.node, false, tcell.distance); } } private: template <typename T> void KDTSearch(const VectorIndex* p_index, const COMMON::QueryResultSet<T>& p_query, COMMON::WorkSpace& p_space, const int node, const bool isInit, const float distBound) const { if (node < 0) { int index = -node - 1; if (index >= p_index->GetNumSamples()) return; #ifdef PREFETCH const char* data = (const char*)(p_index->GetSample(index)); _mm_prefetch(data, _MM_HINT_T0); _mm_prefetch(data + 64, _MM_HINT_T0); #endif if (p_space.CheckAndSet(index)) return; ++p_space.m_iNumberOfTreeCheckedLeaves; ++p_space.m_iNumberOfCheckedLeaves; p_space.m_NGQueue.insert(COMMON::HeapCell( index, p_index->ComputeDistance((const void*)p_query.GetTarget(), (const void*)data))); return; } auto& tnode = m_pTreeRoots[node]; float diff = (p_query.GetTarget())[tnode.split_dim] - tnode.split_value; float distanceBound = distBound + diff * diff; int otherChild, bestChild; if (diff < 0) { bestChild = tnode.left; otherChild = tnode.right; } else { otherChild = tnode.left; bestChild = tnode.right; } if (!isInit || distanceBound < p_query.worstDist()) { p_space.m_SPTQueue.insert(COMMON::HeapCell(otherChild, distanceBound)); } KDTSearch(p_index, p_query, p_space, bestChild, isInit, distBound); } template <typename T> void DivideTree(VectorIndex* p_index, std::vector<int>& indices, int first, int last, int index, int& iTreeSize) { ChooseDivision<T>(p_index, m_pTreeRoots[index], indices, first, last); int i = Subdivide<T>(p_index, m_pTreeRoots[index], indices, first, last); if (i - 1 <= first) { m_pTreeRoots[index].left = -indices[first] - 1; } else { iTreeSize++; m_pTreeRoots[index].left = iTreeSize; DivideTree<T>(p_index, indices, first, i - 1, iTreeSize, iTreeSize); } if (last == i) { m_pTreeRoots[index].right = -indices[last] - 1; } else { iTreeSize++; m_pTreeRoots[index].right = iTreeSize; DivideTree<T>(p_index, indices, i, last, iTreeSize, iTreeSize); } } template <typename T> void ChooseDivision(VectorIndex* p_index, KDTNode& node, const std::vector<int>& indices, const int first, const int last) { std::vector<float> meanValues(p_index->GetFeatureDim(), 0); std::vector<float> varianceValues(p_index->GetFeatureDim(), 0); int end = min(first + m_iSamples, last); int count = end - first + 1; // calculate the mean of each dimension for (int j = first; j <= end; j++) { const T* v = (const T*)p_index->GetSample(indices[j]); for (int k = 0; k < p_index->GetFeatureDim(); k++) { meanValues[k] += v[k]; } } for (int k = 0; k < p_index->GetFeatureDim(); k++) { meanValues[k] /= count; } // calculate the variance of each dimension for (int j = first; j <= end; j++) { const T* v = (const T*)p_index->GetSample(indices[j]); for (int k = 0; k < p_index->GetFeatureDim(); k++) { float dist = v[k] - meanValues[k]; varianceValues[k] += dist * dist; } } // choose the split dimension as one of the dimension inside TOP_DIM maximum // variance node.split_dim = SelectDivisionDimension(varianceValues); // determine the threshold node.split_value = meanValues[node.split_dim]; } int SelectDivisionDimension(const std::vector<float>& varianceValues) const { // Record the top maximum variances std::vector<int> topind(m_numTopDimensionKDTSplit); int num = 0; // order the variances for (int i = 0; i < varianceValues.size(); i++) { if (num < m_numTopDimensionKDTSplit || varianceValues[i] > varianceValues[topind[num - 1]]) { if (num < m_numTopDimensionKDTSplit) { topind[num++] = i; } else { topind[num - 1] = i; } int j = num - 1; // order the TOP_DIM variances while (j > 0 && varianceValues[topind[j]] > varianceValues[topind[j - 1]]) { std::swap(topind[j], topind[j - 1]); j--; } } } // randomly choose a dimension from TOP_DIM return topind[Utils::rand_int(num)]; } template <typename T> int Subdivide(VectorIndex* p_index, const KDTNode& node, std::vector<int>& indices, const int first, const int last) const { int i = first; int j = last; // decide which child one point belongs while (i <= j) { int ind = indices[i]; const T* v = (const T*)p_index->GetSample(ind); float val = v[node.split_dim]; if (val < node.split_value) { i++; } else { std::swap(indices[i], indices[j]); j--; } } // if all the points in the node are equal,equally split the node into 2 if ((i == first) || (i == last + 1)) { i = (first + last + 1) / 2; } return i; } private: std::vector<int> m_pTreeStart; std::vector<KDTNode> m_pTreeRoots; public: int m_iTreeNumber, m_numTopDimensionKDTSplit, m_iSamples; }; } // COMMON } // vsearch #endif //VECTORSEARCH_THIRD_PARTY_SPTAG_KDTREE_H_
main.c
#include "./typos_v.h" #include <omp.h> const int BODY_COUNT = N4; double magnitude(data3D data){ double squareOfLength = 0.0; squareOfLength += data.x * data.x; squareOfLength += data.y * data.y; squareOfLength += data.z * data.z; return sqrt(squareOfLength); } void normalize(data3D data){ double length = magnitude(data); data.x = data.x / length; data.y = data.y / length; data.z = data.z / length; } void invert(data3D data) { data.x *= -1.0; data.y *= -1.0; data.z *= -1.0; } void direction(data3D fromVector, data3D toVector, data3D resultVector) { resultVector.x = toVector.x - fromVector.x; resultVector.y = toVector.y - fromVector.y; resultVector.z = toVector.z - fromVector.z; normalize(resultVector); } double forceNewtonianGravity3D(double onMass, double becauseOfMass, data3D onPosition, data3D becauseOfPosition) { double deltaX = becauseOfPosition.x - onPosition.x; double deltaY = becauseOfPosition.y - onPosition.y; double deltaZ = becauseOfPosition.z - onPosition.z; double distance = sqrt(deltaX * deltaX + deltaY * deltaY + deltaZ * deltaZ); if(distance == 0) { return 0; } double result = G * (onMass * becauseOfMass) / (distance * distance); return result; } double computeAccel(double mass, double force ) { if( force == 0 ) { return 0; } double result = force / mass; return result; } double computeVelo(double current, double previous, float deltaT) { return previous + (current * deltaT); } double computePos(double current, double previous, float deltaT) { return previous + (current * deltaT); } data3D computeAccel3D(double mass, data3D force) { data3D anAccelVector = {0, 0, 0}; anAccelVector.x = computeAccel(mass, force.x); anAccelVector.y = computeAccel(mass, force.y); anAccelVector.z = computeAccel(mass, force.z); return anAccelVector; } data3D computeVelo3D(data3D accel, data3D prevVelo, float deltaT) { data3D adoubleVector = {0, 0, 0}; adoubleVector.x = computeVelo( accel.x, prevVelo.x, deltaT ); adoubleVector.y = computeVelo( accel.y, prevVelo.y, deltaT ); adoubleVector.z = computeVelo( accel.z, prevVelo.z, deltaT ); return adoubleVector; } data3D computePos3D(data3D velo, data3D prevPos, float deltaT) { data3D anPositionVector = {0, 0, 0}; anPositionVector.x = computePos(velo.x, prevPos.x, deltaT); anPositionVector.y = computePos(velo.y, prevPos.y, deltaT); anPositionVector.z = computePos(velo.z, prevPos.z, deltaT); return anPositionVector; } void updateAcceleration(int bodyIndex){ data3D netForce = { 0, 0, 0 }; for(int i = 0; i < BODY_COUNT; i++){ data3D vectorForceToOther = {0, 0, 0}; double scalarForceBetween = forceNewtonianGravity3D(mass[bodyIndex], mass[i], position[bodyIndex], position[i]); direction(position[bodyIndex], position[i], vectorForceToOther); vectorForceToOther.x *= scalarForceBetween; vectorForceToOther.y *= scalarForceBetween; vectorForceToOther.z *= scalarForceBetween; netForce.x += vectorForceToOther.x; netForce.y += vectorForceToOther.y; netForce.z += vectorForceToOther.z; } acceleration[bodyIndex] = computeAccel3D(mass[bodyIndex], netForce); } void updateVelocity(int bodyIndex, float deltaT){ speed[bodyIndex] = computeVelo3D(acceleration[bodyIndex], speed[bodyIndex], deltaT); } void updatePosition(int bodyIndex, float deltaT){ position[bodyIndex] = computePos3D(speed[bodyIndex], position[bodyIndex], deltaT); } void displayAll(){ int index; for(index=0; index < BODY_COUNT; index++) { printf("\nBody %d:\nMassa: %f\nPosiçao(x ,y, z): %f, %f, %f\nVelocidade(x, y, z): %f, %f, %f\nAceleracao(x ,y, z): %f, %f, %f\n\n", index + 1, mass[index], position[index].x, position[index].y, position[index].z, speed[index].x, speed[index].y, speed[index].z, acceleration[index].x, acceleration[index].y, acceleration[index].z); } } void updatePhysics(float deltaT){ int i; #pragma omp parallel for num_threads(8) for(i=0; i < BODY_COUNT; i++) { updateAcceleration(i); updateVelocity(i, deltaT); updatePosition(i, deltaT); } } void nBodyStart(){ int j = 0; int i = 0; for (j = 0; j < 1; ++j){ for (i = 0; i < 10000; ++i){ updatePhysics(i * 100); } } //displayAll(); } int main(){ nBodyStart(); return 0; }
task_in_joinbarrier.c
// RUN: %libomp-compile-and-run | %sort-threads | FileCheck %s // REQUIRES: ompt // XFAIL: gcc-4 // gcc-4 manages frame pointers for parallel regions differently than other APIs. the parallel region's enter_frame.ptr // matches the implicit task's exit_frame.ptr. for that reason, this test will fail. #define TEST_NEED_PRINT_FRAME_FROM_OUTLINED_FN #include "callback.h" #include <omp.h> int main() { int condition=0; omp_set_nested(0); print_frame(0); #pragma omp parallel num_threads(2) { print_frame_from_outlined_fn(1); print_ids(0); print_ids(1); print_frame(0); #pragma omp master { print_ids(0); #pragma omp task shared(condition) { OMPT_SIGNAL(condition); print_frame(1); print_ids(0); print_ids(1); print_ids(2); } OMPT_WAIT(condition,1); print_ids(0); } print_ids(0); } // Check if libomp supports the callbacks for this test. // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_task_create' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_task_schedule' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_parallel_begin' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_parallel_end' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_implicit_task' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_acquire' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_acquired' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_released' // CHECK: {{^}}0: NULL_POINTER=[[NULL:.*$]] // make sure initial data pointers are null // CHECK-NOT: 0: new_task_data initially not null // CHECK: {{^}}[[MASTER_ID:[0-9]+]]: __builtin_frame_address(0)=[[MAIN_REENTER:0x[0-f]+]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_parallel_begin: parent_task_id=[[PARENT_TASK_ID:[0-9]+]], parent_task_frame.exit=[[NULL]], parent_task_frame.reenter=0x{{[0-f]+}}, parallel_id=[[PARALLEL_ID:[0-9]+]], requested_team_size=2, codeptr_ra=0x{{[0-f]+}}, invoker=[[PARALLEL_INVOKER:[0-9]+]] // nested parallel masters // CHECK: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // CHECK: {{^}}[[MASTER_ID]]: __builtin_frame_address({{.}})=[[EXIT:0x[0-f]+]] // CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT]], reenter_frame=[[NULL]] // CHECK: {{^}}[[MASTER_ID]]: task level 1: parallel_id=[[IMPLICIT_PARALLEL_ID:[0-9]+]], task_id=[[PARENT_TASK_ID]], exit_frame=[[NULL]], reenter_frame=0x{{[0-f]+}} // CHECK: {{^}}[[MASTER_ID]]: __builtin_frame_address(0)=[[REENTER:0x[0-f]+]] // CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT]], reenter_frame=[[NULL]] // <- ompt_event_task_create would be expected here // CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_create: parent_task_id=[[IMPLICIT_TASK_ID]], parent_task_frame.exit=[[EXIT]], parent_task_frame.reenter=0x{{[0-f]+}}, new_task_id=[[TASK_ID:[0-9]+]], codeptr_ra=[[TASK_FUNCTION:0x[0-f]+]] // CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT]], reenter_frame=[[NULL]] // implicit barrier parallel // CHECK: {{^}}[[MASTER_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[NULL]], reenter_frame=[[NULL]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]] // CHECK: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // CHECK: {{^}}[[THREAD_ID]]: __builtin_frame_address({{.}})=[[EXIT:0x[0-f]+]] // CHECK: {{^}}[[THREAD_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT]], reenter_frame=[[NULL]] // CHECK: {{^}}[[THREAD_ID]]: task level 1: parallel_id=[[IMPLICIT_PARALLEL_ID]], task_id=[[PARENT_TASK_ID]], exit_frame=[[NULL]], reenter_frame=0x{{[0-f]+}} // CHECK: {{^}}[[THREAD_ID]]: __builtin_frame_address(0)=[[REENTER:0x[0-f]+]] // implicit barrier parallel // CHECK: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // CHECK: {{^}}[[THREAD_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[NULL]], reenter_frame=[[NULL]] // CHECK: {{^}}[[THREAD_ID]]: ompt_event_task_schedule: first_task_id=[[IMPLICIT_TASK_ID]], second_task_id=[[TASK_ID]] // CHECK: {{^}}[[THREAD_ID]]: __builtin_frame_address(1)=[[TASK_EXIT:0x[0-f]+]] // CHECK: {{^}}[[THREAD_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[TASK_ID]], exit_frame=[[TASK_EXIT]], reenter_frame=[[NULL]] // CHECK: {{^}}[[THREAD_ID]]: task level 1: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[NULL]], reenter_frame=[[NULL]] // CHECK: {{^}}[[THREAD_ID]]: task level 2: parallel_id=[[IMPLICIT_PARALLEL_ID]], task_id=[[PARENT_TASK_ID]], exit_frame=[[NULL]], reenter_frame=0x{{[0-f]+}} // CHECK: {{^}}[[THREAD_ID]]: ompt_event_task_schedule: first_task_id=[[TASK_ID]], second_task_id=[[IMPLICIT_TASK_ID]] // CHECK: {{^}}[[THREAD_ID]]: ompt_event_task_end: task_id=[[TASK_ID]] // CHECK: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]] // CHECK: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]] return 0; }
deconvolution_3x3.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #if __ARM_NEON #include <arm_neon.h> #endif // __ARM_NEON static void deconv3x3s1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outch = top_blob.c; const float* kernel = _kernel; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int p=0; p<outch; p++) { Mat out = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; out.fill(bias0); for (int q=0; q<inch; q++) { const float* img0 = bottom_blob.channel(q); const float* kernel0 = kernel + p*inch*9 + q*9; const float* r0 = img0; const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; #if __ARM_NEON float32x4_t _k0 = vld1q_f32(k0); float32x4_t _k1 = vld1q_f32(k1); float32x4_t _k2 = vld1q_f32(k2); #endif // __ARM_NEON for (int i = 0; i < h; i++) { float* outptr = out.row(i); float* outptr0 = outptr; float* outptr1 = outptr + outw; float* outptr2 = outptr + outw*2; int j = 0; #if __ARM_NEON for (; j+3 < w; j+=4) { float32x4_t _v = vld1q_f32(r0); #if 0 // bad compiler generate slow instructions :( // 0 float32x4_t _out00 = vld1q_f32(outptr0 + 0); _out00 = vmlaq_lane_f32(_out00, _v, vget_low_f32(_k0), 0); float32x4_t _out01 = vmulq_lane_f32(_v, vget_low_f32(_k0), 1); // ext float32x4_t _zero_out01 = vdupq_n_f32(0.f); _zero_out01 = vextq_f32(_zero_out01, _out01, 3); _out00 = vaddq_f32(_out00, _zero_out01); // float32x2_t _out00low = vget_low_f32(_out00); float32x2_t _out00high = vget_high_f32(_out00); _out00high = vmla_lane_f32(_out00high, vget_low_f32(_v), vget_high_f32(_k0), 0); _out00 = vcombine_f32(_out00low, _out00high); vst1q_f32(outptr0 + 0, _out00); // float32x2_t _out02high = vld1_f32(outptr0 + 4); float32x2_t _out01_zero = vext_f32(vget_high_f32(_out01), vget_low_f32(_zero_out01), 1); _out02high = vadd_f32(_out02high, _out01_zero); _out02high = vmla_lane_f32(_out02high, vget_high_f32(_v), vget_high_f32(_k0), 0); vst1_f32(outptr0 + 4, _out02high); // 1 float32x4_t _out10 = vld1q_f32(outptr1 + 0); _out10 = vmlaq_lane_f32(_out10, _v, vget_low_f32(_k1), 0); float32x4_t _out11 = vmulq_lane_f32(_v, vget_low_f32(_k1), 1); // ext float32x4_t _zero_out11 = vdupq_n_f32(0.f); _zero_out11 = vextq_f32(_zero_out11, _out11, 3); _out10 = vaddq_f32(_out10, _zero_out11); // float32x2_t _out10low = vget_low_f32(_out10); float32x2_t _out10high = vget_high_f32(_out10); _out10high = vmla_lane_f32(_out10high, vget_low_f32(_v), vget_high_f32(_k1), 0); _out10 = vcombine_f32(_out10low, _out10high); vst1q_f32(outptr1 + 0, _out10); // float32x2_t _out12high = vld1_f32(outptr1 + 4); float32x2_t _out11_zero = vext_f32(vget_high_f32(_out11), vget_low_f32(_zero_out11), 1); _out12high = vadd_f32(_out12high, _out11_zero); _out12high = vmla_lane_f32(_out12high, vget_high_f32(_v), vget_high_f32(_k1), 0); vst1_f32(outptr1 + 4, _out12high); // 2 float32x4_t _out20 = vld1q_f32(outptr2 + 0); _out20 = vmlaq_lane_f32(_out20, _v, vget_low_f32(_k2), 0); float32x4_t _out21 = vmulq_lane_f32(_v, vget_low_f32(_k2), 1); // ext float32x4_t _zero_out21 = vdupq_n_f32(0.f); _zero_out21 = vextq_f32(_zero_out21, _out21, 3); _out20 = vaddq_f32(_out20, _zero_out21); // float32x2_t _out20low = vget_low_f32(_out20); float32x2_t _out20high = vget_high_f32(_out20); _out20high = vmla_lane_f32(_out20high, vget_low_f32(_v), vget_high_f32(_k2), 0); _out20 = vcombine_f32(_out20low, _out20high); vst1q_f32(outptr2 + 0, _out20); // float32x2_t _out22high = vld1_f32(outptr2 + 4); float32x2_t _out21_zero = vext_f32(vget_high_f32(_out21), vget_low_f32(_zero_out21), 1); _out22high = vadd_f32(_out22high, _out21_zero); _out22high = vmla_lane_f32(_out22high, vget_high_f32(_v), vget_high_f32(_k2), 0); vst1_f32(outptr2 + 4, _out22high); #else // float32x4_t _out00 = vld1q_f32(outptr0 + 0); _out00 = vmlaq_lane_f32(_out00, _v, vget_low_f32(_k0), 0); vst1q_f32(outptr0 + 0, _out00); float32x4_t _out01 = vld1q_f32(outptr0 + 1); _out01 = vmlaq_lane_f32(_out01, _v, vget_low_f32(_k0), 1); vst1q_f32(outptr0 + 1, _out01); float32x4_t _out02 = vld1q_f32(outptr0 + 2); _out02 = vmlaq_lane_f32(_out02, _v, vget_high_f32(_k0), 0); vst1q_f32(outptr0 + 2, _out02); // float32x4_t _out10 = vld1q_f32(outptr1 + 0); _out10 = vmlaq_lane_f32(_out10, _v, vget_low_f32(_k1), 0); vst1q_f32(outptr1 + 0, _out10); float32x4_t _out11 = vld1q_f32(outptr1 + 1); _out11 = vmlaq_lane_f32(_out11, _v, vget_low_f32(_k1), 1); vst1q_f32(outptr1 + 1, _out11); float32x4_t _out12 = vld1q_f32(outptr1 + 2); _out12 = vmlaq_lane_f32(_out12, _v, vget_high_f32(_k1), 0); vst1q_f32(outptr1 + 2, _out12); // float32x4_t _out20 = vld1q_f32(outptr2 + 0); _out20 = vmlaq_lane_f32(_out20, _v, vget_low_f32(_k2), 0); vst1q_f32(outptr2 + 0, _out20); float32x4_t _out21 = vld1q_f32(outptr2 + 1); _out21 = vmlaq_lane_f32(_out21, _v, vget_low_f32(_k2), 1); vst1q_f32(outptr2 + 1, _out21); float32x4_t _out22 = vld1q_f32(outptr2 + 2); _out22 = vmlaq_lane_f32(_out22, _v, vget_high_f32(_k2), 0); vst1q_f32(outptr2 + 2, _out22); #endif r0 += 4; outptr0 += 4; outptr1 += 4; outptr2 += 4; } #endif // __ARM_NEON for (; j < w; j++) { float val = r0[0]; outptr0[0] += val * k0[0]; outptr0[1] += val * k0[1]; outptr0[2] += val * k0[2]; outptr1[0] += val * k1[0]; outptr1[1] += val * k1[1]; outptr1[2] += val * k1[2]; outptr2[0] += val * k2[0]; outptr2[1] += val * k2[1]; outptr2[2] += val * k2[2]; r0++; outptr0++; outptr1++; outptr2++; } } } } } static void deconv3x3s2_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outch = top_blob.c; const float* kernel = _kernel; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int p=0; p<outch; p++) { Mat out = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; out.fill(bias0); for (int q=0; q<inch; q++) { const float* img0 = bottom_blob.channel(q); const float* kernel0 = kernel + p*inch*9 + q*9; const float* r0 = img0; const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; #if __ARM_NEON float32x4_t _k0 = vld1q_f32(k0); float32x4_t _k1 = vld1q_f32(k1); float32x4_t _k2 = vld1q_f32(k2); #endif // __ARM_NEON for (int i = 0; i < h; i++) { float* outptr = out.row(i*2); float* outptr0 = outptr; float* outptr1 = outptr0 + outw; float* outptr2 = outptr1 + outw; int j = 0; #if __ARM_NEON for (; j+3 < w; j+=4) { float32x4_t _v = vld1q_f32(r0); // out row 0 float32x4_t _out00 = vmulq_lane_f32(_v, vget_low_f32(_k0), 0); // 0,2,4,6 float32x4_t _out01 = vmulq_lane_f32(_v, vget_low_f32(_k0), 1); // 1,3,5,7 float32x4_t _out02 = vmulq_lane_f32(_v, vget_high_f32(_k0), 0); // 2,4,6,8 float32x4x2_t _out0 = vld2q_f32(outptr0); _out0.val[0] = vaddq_f32(_out0.val[0], _out00); // 0,2,4,6 _out0.val[1] = vaddq_f32(_out0.val[1], _out01); // 1,3,5,7 vst2q_f32(outptr0, _out0); _out0 = vld2q_f32(outptr0 + 2); _out0.val[0] = vaddq_f32(_out0.val[0], _out02); // 2,4,6,8 vst2q_f32(outptr0 + 2, _out0); // out row 1 float32x4_t _out10 = vmulq_lane_f32(_v, vget_low_f32(_k1), 0); // 0,2,4,6 float32x4_t _out11 = vmulq_lane_f32(_v, vget_low_f32(_k1), 1); // 1,3,5,7 float32x4_t _out12 = vmulq_lane_f32(_v, vget_high_f32(_k1), 0); // 2,4,6,8 float32x4x2_t _out1 = vld2q_f32(outptr1); _out1.val[0] = vaddq_f32(_out1.val[0], _out10); // 0,2,4,6 _out1.val[1] = vaddq_f32(_out1.val[1], _out11); // 1,3,5,7 vst2q_f32(outptr1, _out1); _out1 = vld2q_f32(outptr1 + 2); _out1.val[0] = vaddq_f32(_out1.val[0], _out12); // 2,4,6,8 vst2q_f32(outptr1 + 2, _out1); // out row 2 float32x4_t _out20 = vmulq_lane_f32(_v, vget_low_f32(_k2), 0); // 0,2,4,6 float32x4_t _out21 = vmulq_lane_f32(_v, vget_low_f32(_k2), 1); // 1,3,5,7 float32x4_t _out22 = vmulq_lane_f32(_v, vget_high_f32(_k2), 0); // 2,4,6,8 float32x4x2_t _out2 = vld2q_f32(outptr2); _out2.val[0] = vaddq_f32(_out2.val[0], _out20); // 0,2,4,6 _out2.val[1] = vaddq_f32(_out2.val[1], _out21); // 1,3,5,7 vst2q_f32(outptr2, _out2); _out2 = vld2q_f32(outptr2 + 2); _out2.val[0] = vaddq_f32(_out2.val[0], _out22); // 2,4,6,8 vst2q_f32(outptr2 + 2, _out2); r0 += 4; outptr0 += 8; outptr1 += 8; outptr2 += 8; } #endif // __ARM_NEON for (; j < w; j++) { float val = r0[0]; outptr0[0] += val * k0[0]; outptr0[1] += val * k0[1]; outptr0[2] += val * k0[2]; outptr1[0] += val * k1[0]; outptr1[1] += val * k1[1]; outptr1[2] += val * k1[2]; outptr2[0] += val * k2[0]; outptr2[1] += val * k2[1]; outptr2[2] += val * k2[2]; r0++; outptr0 += 2; outptr1 += 2; outptr2 += 2; } } } } }
gen_input.c
#include <time.h> #include <stdlib.h> #include <stdio.h> #ifdef FP_NUMBER typedef double FP_NUMBER; #else typedef float FP_NUMBER; #endif #define GET_RAND_FP \ ((FP_NUMBER)rand() / ((FP_NUMBER)(RAND_MAX) + (FP_NUMBER)(1))) char L_FNAME[32], U_FNAME[32], A_FNAME[32]; int main(int argc, char **argv) { int i, j, k, MatrixDim; FP_NUMBER sum, **L, **U, **A; FILE *fl, *fu, *fa; if (argc < 2) { printf("./gen_input [Matrix_Dimension_size]\n"); return 1; } MatrixDim = atoi(argv[1]); L = (FP_NUMBER **)malloc(sizeof(FP_NUMBER *) * MatrixDim); U = (FP_NUMBER **)malloc(sizeof(FP_NUMBER *) * MatrixDim); A = (FP_NUMBER **)malloc(sizeof(FP_NUMBER *) * MatrixDim); if (!L || !U || !A) { printf("Can not allocate memory\n"); if (L) free(L); if (U) free(U); if (A) free(A); return 1; } srand(time(NULL)); sprintf(L_FNAME, "l-%d.dat", MatrixDim); fl = fopen(L_FNAME, "wb"); if (fl == NULL) { printf("Cannot open file %s\n", L_FNAME); return 1; } sprintf(U_FNAME, "u-%d.dat", MatrixDim); fu = fopen(U_FNAME, "wb"); if (fu == NULL) { printf("Cannot open file %s\n", U_FNAME); return 1; } sprintf(A_FNAME, "%d.dat", MatrixDim); fa = fopen(A_FNAME, "wb"); if (!fa) { printf("Cannot open file %s\n", A_FNAME); return 1; } for (i = 0; i < MatrixDim; i++) { L[i] = (FP_NUMBER *)malloc(sizeof(FP_NUMBER) * MatrixDim); U[i] = (FP_NUMBER *)malloc(sizeof(FP_NUMBER) * MatrixDim); A[i] = (FP_NUMBER *)malloc(sizeof(FP_NUMBER) * MatrixDim); } #if 1 #pragma omp parallel for default(none) private(i, j) shared(L, U, MatrixDim) #endif for (i = 0; i < MatrixDim; i++) { for (j = 0; j < MatrixDim; j++) { if (i == j) { L[i][j] = 1.0; U[i][j] = GET_RAND_FP; } else if (i < j) { L[i][j] = 0; U[i][j] = GET_RAND_FP; } else { // i > j L[i][j] = GET_RAND_FP; U[i][j] = 0; } } } #if 1 #pragma omp parallel for default(none) private(i, j, k, sum) shared(L, U, A, \ MatrixDim) #endif for (i = 0; i < MatrixDim; i++) { for (j = 0; j < MatrixDim; j++) { sum = 0; for (k = 0; k < MatrixDim; k++) sum += L[i][k] * U[k][j]; A[i][j] = sum; } } for (i = 0; i < MatrixDim; i++) { for (j = 0; j < MatrixDim; j++) fprintf(fl, "%f ", L[i][j]); fprintf(fl, "\n"); } fclose(fl); for (i = 0; i < MatrixDim; i++) { for (j = 0; j < MatrixDim; j++) fprintf(fu, "%f ", U[i][j]); fprintf(fu, "\n"); } fclose(fu); fprintf(fa, "%d\n", MatrixDim); for (i = 0; i < MatrixDim; i++) { for (j = 0; j < MatrixDim; j++) fprintf(fa, "%f ", A[i][j]); fprintf(fa, "\n"); } fclose(fa); for (i = 0; i < MatrixDim; i++) { free(L[i]); free(U[i]); free(A[i]); } free(L); free(U); free(A); return 0; }
firstlastprivate.c
#include <stdio.h> #ifdef _OPENMP #include <omp.h> #else #define omp_get_thread_num() 0 #endif main() { int i, n = 7; int a[n], suma=0; for (i=0; i<n; i++) a[i] = i; #pragma omp parallel for firstprivate(suma) lastprivate(suma) for (i=0; i<n; i++) { suma = suma + a[i]; printf(" thread %d suma a[%d] suma=%d \n", omp_get_thread_num(),i,suma); } printf("\nFuera de la construcción parallel suma=%d\n", suma); }
GB_unaryop__minv_uint64_uint32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_uint64_uint32 // op(A') function: GB_tran__minv_uint64_uint32 // C type: uint64_t // A type: uint32_t // cast: uint64_t cij = (uint64_t) aij // unaryop: cij = GB_IMINV_UNSIGNED (aij, 64) #define GB_ATYPE \ uint32_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_UNSIGNED (x, 64) ; // casting #define GB_CASTING(z, x) \ uint64_t z = (uint64_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_UINT64 || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_uint64_uint32 ( uint64_t *restrict Cx, const uint32_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_uint64_uint32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
RandOpt.c
/* kcollins - RandomAccess core_single_cpu kernel from HPCC */ /* with C driver for standalone testing */ /* * This code has been contributed by the DARPA HPCS program. Contact * David Koester <dkoester@mitre.org> or Bob Lucas <rflucas@isi.edu> * if you have questions. * * GUPS (Giga UPdates per Second) is a measurement that profiles the memory * architecture of a system and is a measure of performance similar to MFLOPS. * The HPCS HPCchallenge RandomAccess benchmark is intended to exercise the * GUPS capability of a system, much like the LINPACK benchmark is intended to * exercise the MFLOPS capability of a computer. In each case, we would * expect these benchmarks to achieve close to the "peak" capability of the * memory system. The extent of the similarities between RandomAccess and * LINPACK are limited to both benchmarks attempting to calculate a peak system * capability. * * GUPS is calculated by identifying the number of memory locations that can be * randomly updated in one second, divided by 1 billion (1e9). The term "randomly" * means that there is little relationship between one address to be updated and * the next, except that they occur in the space of one half the total system * memory. An update is a read-modify-write operation on a table of 64-bit words. * An address is generated, the value at that address read from memory, modified * by an integer operation (add, and, or, xor) with a literal value, and that * new value is written back to memory. * * We are interested in knowing the GUPS performance of both entire systems and * system subcomponents --- e.g., the GUPS rating of a distributed memory * multiprocessor the GUPS rating of an SMP node, and the GUPS rating of a * single processor. While there is typically a scaling of FLOPS with processor * count, a similar phenomenon may not always occur for GUPS. * * For additional information on the GUPS metric, the HPCchallenge RandomAccess * Benchmark,and the rules to run RandomAccess or modify it to optimize * performance -- see http://icl.cs.utk.edu/hpcc/ * */ /* * This file contains the computational core of the single cpu version * of GUPS. The inner loop should easily be vectorized by compilers * with such support. * * This core is used by both the single_cpu and star_single_cpu tests. */ /* Number of updates to table (suggested: 4x number of table entries) */ #include <sys/types.h> #include <stdlib.h> #include <stdint.h> #include <stdio.h> #include <omp.h> #include "hif.h" #define POLY 0x0000000000000007UL #define PERIOD 1317624576693539401L #define NUPDATE (4 * TableSize) #define NUMT 512 uint64_t HPCC_starts(int64_t); static void RandomAccessUpdate(uint64_t TableSize, uint64_t *Table, int numt) { uint64_t i; uint64_t *ran; // uint64_t ran[NUMT]; /* Current random numbers */ int j; /* Perform updates to main table. The scalar equivalent is: * * uint64_t ran; * ran = 1; * for (i=0; i<NUPDATE; i++) { * ran = (ran << 1) ^ (((int64_t) ran < 0) ? POLY : 0); * table[ran & (TableSize-1)] ^= ran; * } */ uint32_t unitCnt = __htc_get_unit_count(); uint64_t ranSize = unitCnt * numt; ran = (uint64_t *)malloc(ranSize*sizeof(uint64_t)); if (! ran) { printf( "Failed to allocate memory for the ran array (%ld).\n", ranSize); exit(1); } #pragma omp parallel for for (j=0; j<ranSize; j++) { ran[j] = HPCC_starts ((NUPDATE/ranSize) * j); } fprintf(stderr,"ran array has been initialized\n"); fflush(stderr); uint32_t updates_per_unit = NUPDATE/unitCnt; printf("NUPDATE is %ld updates_per_unit is %ld\n", NUPDATE, updates_per_unit); #pragma omp target teams num_teams(unitCnt) { int unit = omp_get_team_num(); uint64_t *unitran = ran + (unit * numt); #pragma omp parallel num_threads(numt) { uint64_t pran = unitran[omp_get_thread_num()]; #pragma omp for schedule(static, 1) nowait for (i=0; i< updates_per_unit; i++) { pran = (pran << 1) ^ ((int64_t) pran < 0 ? POLY : 0); Table[pran & (TableSize-1)] ^= pran; } } } } /* Utility routine to start random number generator at Nth step */ uint64_t HPCC_starts(int64_t n) { int i, j; uint64_t m2[64]; uint64_t temp, ran; while (n < 0) n += PERIOD; while (n > PERIOD) n -= PERIOD; if (n == 0) return 0x1; temp = 0x1; for (i=0; i<64; i++) { m2[i] = temp; temp = (temp << 1) ^ ((int64_t) temp < 0 ? POLY : 0); temp = (temp << 1) ^ ((int64_t) temp < 0 ? POLY : 0); } for (i=62; i>=0; i--) if ((n >> i) & 1) break; ran = 0x2; while (i > 0) { temp = 0; for (j=0; j<64; j++) if ((ran >> j) & 1) temp ^= m2[j]; ran = temp; i -= 1; if ((n >> i) & 1) ran = (ran << 1) ^ ((int64_t) ran < 0 ? POLY : 0); } return ran; } /*kcollins timers*/ #include <sys/time.h> #include <sys/resource.h> double RTSEC() { struct timeval tp; struct timezone tzp; int i; i = gettimeofday(&tp,&tzp); return ( (double) tp.tv_sec + (double) tp.tv_usec * 1.e-6 ); } double CPUSEC() { struct rusage ru; int i; i = getrusage(RUSAGE_SELF,&ru); return( (double) ru.ru_utime.tv_sec + (double) ru.ru_utime.tv_usec * 1.e-6 ); } int main (int argc, char **argv) { uint64_t i; uint64_t temp; double cputime; /* CPU time to update table */ double realtime; /* Real time to update table */ double GUPs; uint64_t *Table; uint64_t *cp_Table; uint64_t TableSize; pers_attach(); int power = 10; if(argc > 1) { power = atoi(argv[1]); } TableSize = 1<<power; Table = (uint64_t *)calloc( TableSize, sizeof(uint64_t) ); if (! Table) { printf( "Failed to allocate memory for the update table (%ld).\n", TableSize); return 1; } cp_Table = ((uint64_t *)(pers_cp_malloc(TableSize*sizeof(uint64_t )))); if (!cp_Table) { printf("Failed to allocate memory for the cp update table (%ld).\n",TableSize); return 1; } /* Print parameters for run */ printf( "Main table size = %ld words\n", TableSize); printf( "Number of updates = %ld\n", NUPDATE); /* Initialize main table */ for (i=0; i<TableSize; i++) Table[i] = i; pers_cp_memcpy(cp_Table, Table, TableSize*sizeof(uint64_t )); /* Begin timing here */ cputime = -CPUSEC(); realtime = -RTSEC(); // RandomAccessUpdate(TableSize,Table); RandomAccessUpdate(TableSize,cp_Table, 256); /* End timed section */ cputime += CPUSEC(); realtime += RTSEC(); pers_cp_memcpy(Table, cp_Table, TableSize*sizeof(uint64_t )); /* make sure no division by zero */ GUPs = (realtime > 0.0 ? 1.0 / realtime : -1.0); GUPs *= 1e-9*NUPDATE; /* Print timing results */ printf( "CPU time used = %.6f seconds\n", cputime); printf( "Real time used = %.6f seconds\n", realtime); printf( "%.9f Billion(10^9) Updates per second [GUP/s]\n", GUPs ); /* Verification of results (in serial or "safe" mode; optional) */ temp = 0x1; for (i=0; i<NUPDATE; i++) { temp = (temp << 1) ^ (((int64_t) temp < 0) ? POLY : 0); Table[temp & (TableSize-1)] ^= temp; } temp = 0; for (i=0; i<TableSize; i++) if (Table[i] != i) temp++; printf( "Found %ld errors in %ld locations (%s).\n", temp, TableSize, (temp <= 0.01*TableSize) ? "passed" : "failed"); free( Table ); return 0; }
unset_callback.c
// RUN: %libomp-compile-and-run | FileCheck %s // REQUIRES: ompt #include "callback.h" #include <omp.h> int main() { #pragma omp parallel num_threads(1) { } ompt_set_callback(ompt_callback_parallel_begin, NULL); #pragma omp parallel num_threads(1) { } // Check if libomp supports the callbacks for this test. // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_idle' // CHECK: 0: NULL_POINTER=[[NULL:.*$]] // CHECK: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_parallel_begin: // CHECK: {{^}}[[THREAD_ID]]: ompt_event_parallel_end: // CHECK-NOT: {{^}}[[THREAD_ID]]: ompt_event_parallel_begin: // CHECK: {{^}}[[THREAD_ID]]: ompt_event_parallel_end: return 0; }
viterbi_decode_op.h
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #pragma once #include <algorithm> #include <memory> #include <string> #include <vector> #include "paddle/fluid/operators/controlflow/compare_op.h" #include "paddle/fluid/operators/elementwise/elementwise_functor.h" #include "paddle/fluid/operators/elementwise/elementwise_op_function.h" #include "paddle/fluid/operators/gather.h" #include "paddle/fluid/operators/math/concat_and_split.h" #include "paddle/fluid/operators/transpose_op.h" #include "paddle/fluid/operators/unique_op.h" #ifdef PADDLE_WITH_MKLML #include <omp.h> #endif namespace paddle { namespace operators { using LoDTensor = framework::LoDTensor; template <typename DeviceContext, typename T, typename IndType> struct Argmax { void operator()(const framework::ExecutionContext& ctx, const Tensor& input, Tensor* out_idx, Tensor* out, int axis) { framework::DDim input_dims = input.dims(); int64_t pre = 1; int64_t post = 1; int64_t n = input_dims[axis]; for (int i = 0; i < axis; i++) { pre *= input_dims[i]; } for (int i = axis + 1; i < input_dims.size(); i++) { post *= input_dims[i]; } int64_t height = pre * post; int64_t width = n; const T* in_data = input.data<T>(); IndType* out_idx_data = out_idx->data<IndType>(); T* out_data = out->data<T>(); // Reduce #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int64_t i = 0; i < height; ++i) { int64_t h = i / post; int64_t w = i % post; IndType max_idx = -1; T max_value = (std::numeric_limits<T>::lowest)(); // for windows compile for (int64_t j = 0; j < width; ++j) { if (in_data[h * width * post + j * post + w] > max_value) { max_value = in_data[h * width * post + j * post + w]; max_idx = j; } } out_data[i] = max_value; out_idx_data[i] = max_idx; } } }; template <typename DeviceContext> struct ARange { void operator()(const DeviceContext& dev_ctx, int64_t* data, int end, int64_t scale) { for (int i = 0; i < end; ++i) { data[i] = i * scale; } } }; template <typename DeviceContext, typename T> struct GetMaxValue { void operator()(const DeviceContext& dev_ctx, const Tensor& input, T* max_value) { auto input_ptr = input.data<T>(); auto num = input.numel(); *max_value = *std::max_element(input_ptr, input_ptr + num); } }; template <typename DeviceContext, typename T, typename IndexT = int> struct Gather { void operator()(const DeviceContext& ctx, const Tensor& src, const Tensor& index, Tensor* output) { CPUGather<T, IndexT>(ctx, src, index, output); } }; template <typename T, typename Functor, typename OutT = T> void SameDimsBinaryOP(const Tensor& lhs, const Tensor& rhs, Tensor* out) { const T* lhs_ptr = lhs.data<T>(); const T* rhs_ptr = rhs.data<T>(); OutT* out_ptr = out->data<OutT>(); Functor functor; #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int i = 0; i < out->numel(); ++i) { out_ptr[i] = functor(lhs_ptr[i], rhs_ptr[i]); } } template <typename DeviceContext, template <typename InT, typename OutT> typename CompareFunctor, typename T> struct GetMask { void operator()(const framework::ExecutionContext& ctx, const Tensor& lhs, const Tensor& rhs, Tensor* mask) { SameDimsBinaryOP<int64_t, CompareFunctor<int64_t, T>, T>(lhs, rhs, mask); } }; template <bool is_multi_threads> struct GetInputIndex { void operator()(const std::vector<int>& lhs_dims, const std::vector<int>& rhs_dims, const std::vector<int>& output_dims, const std::vector<int>& lhs_strides, const std::vector<int>& rhs_strides, const std::vector<int>& output_strides, int output_idx, int* index_array, int* lhs_idx, int* rhs_idx) { int out_dims_size = output_strides.size(); for (int j = 0; j < out_dims_size; ++j) { int curr_idx = output_idx / output_strides[j]; output_idx %= output_strides[j]; *lhs_idx += (lhs_dims[j] > 1) ? curr_idx * lhs_strides[j] : 0; *rhs_idx += (rhs_dims[j] > 1) ? curr_idx * rhs_strides[j] : 0; } } }; template <> struct GetInputIndex<false> { void operator()(const std::vector<int>& lhs_dims, const std::vector<int>& rhs_dims, const std::vector<int>& output_dims, const std::vector<int>& lhs_strides, const std::vector<int>& rhs_strides, const std::vector<int>& output_strides, int output_idx, int* index_array, int* lhs_idx, int* rhs_idx) { int out_dims_size = output_strides.size(); *lhs_idx = pten::GetElementwiseIndex(lhs_dims.data(), out_dims_size, index_array); *rhs_idx = pten::GetElementwiseIndex(rhs_dims.data(), out_dims_size, index_array); pten::UpdateElementwiseIndexArray(output_dims.data(), out_dims_size, index_array); } }; template <typename T, typename Functor, bool is_multi_threads = false> void SimpleBroadcastBinaryOP(const Tensor& lhs, const Tensor& rhs, Tensor* out) { const T* lhs_ptr = lhs.data<T>(); const T* rhs_ptr = rhs.data<T>(); T* out_ptr = out->data<T>(); int out_size = static_cast<int>(out->dims().size()); std::vector<int> out_dims(out_size); std::vector<int> lhs_dims(out_size); std::vector<int> rhs_dims(out_size); std::copy(lhs.dims().Get(), lhs.dims().Get() + out_size, lhs_dims.data()); std::copy(rhs.dims().Get(), rhs.dims().Get() + out_size, rhs_dims.data()); std::copy(out->dims().Get(), out->dims().Get() + out_size, out_dims.data()); std::vector<int> output_strides(out_size, 1); std::vector<int> lhs_strides(out_size, 1); std::vector<int> rhs_strides(out_size, 1); std::vector<int> index_array(out_size, 0); // calculate strides for (int i = out_size - 2; i >= 0; --i) { output_strides[i] = output_strides[i + 1] * out_dims[i + 1]; lhs_strides[i] = lhs_strides[i + 1] * lhs_dims[i + 1]; rhs_strides[i] = rhs_strides[i + 1] * rhs_dims[i + 1]; } Functor functor; GetInputIndex<is_multi_threads> get_input_index; #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int i = 0; i < out->numel(); ++i) { int lhs_idx = 0; int rhs_idx = 0; get_input_index(lhs_dims, rhs_dims, out_dims, lhs_strides, rhs_strides, output_strides, i, index_array.data(), &lhs_idx, &rhs_idx); out_ptr[i] = functor(lhs_ptr[lhs_idx], rhs_ptr[rhs_idx]); } } template <typename DeviceContext, template <typename T> typename BinaryFunctor, typename T> struct BinaryOperation { void operator()(const DeviceContext& dev_ctx, const Tensor& lhs, const Tensor& rhs, Tensor* output) { if (lhs.dims() == rhs.dims()) { SameDimsBinaryOP<T, BinaryFunctor<T>>(lhs, rhs, output); } else { bool is_multi_threads = false; #ifdef PADDLE_WITH_MKLML if (omp_get_max_threads() > 1) { is_multi_threads = true; } #endif if (is_multi_threads) { SimpleBroadcastBinaryOP<T, BinaryFunctor<T>, true>(lhs, rhs, output); } else { SimpleBroadcastBinaryOP<T, BinaryFunctor<T>, false>(lhs, rhs, output); } } } }; class TensorBuffer { public: explicit TensorBuffer(const LoDTensor& in) : buffer_(in), offset_(0) { buffer_.Resize({buffer_.numel()}); } Tensor GetBufferBlock(std::initializer_list<int64_t> shape) { int64_t size = std::accumulate(shape.begin(), shape.end(), 1, std::multiplies<int64_t>()); Tensor block = buffer_.Slice(offset_, offset_ + size); offset_ += size; block.Resize(shape); return block; } private: LoDTensor buffer_; // need to resize 1-D Tensor int offset_; }; template <typename DeviceContext, typename T> class ViterbiDecodeKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { bool include_bos_eos_tag = ctx.Attr<bool>("include_bos_eos_tag"); auto& dev_ctx = ctx.template device_context<DeviceContext>(); auto curr_place = ctx.GetPlace(); auto* input = ctx.Input<Tensor>("Input"); auto batch_size = static_cast<int>(input->dims()[0]); auto seq_len = static_cast<int>(input->dims()[1]); auto n_labels = static_cast<int>(input->dims()[2]); pten::funcs::SetConstant<DeviceContext, T> float_functor; pten::funcs::SetConstant<DeviceContext, int64_t> int_functor; std::vector<Tensor> historys; // We create tensor buffer in order to avoid allocating memory frequently // 10 means allocate 10*batch_size bytes memory, such as int_mask, zero... int buffer_size = batch_size * (n_labels + 1) * seq_len + 10 * batch_size; LoDTensor int_buffer; int_buffer.Resize(framework::make_ddim({buffer_size})); int_buffer.mutable_data<int64_t>(ctx.GetPlace()); TensorBuffer int_tensor_buffer(int_buffer); // create float tensor buffer // 10 means allocate 10*batch_size*n_labels bytes, such as alpha, alpha_max buffer_size = batch_size * (seq_len + 10) * n_labels + (batch_size + 2) * n_labels * n_labels; LoDTensor float_buffer; float_buffer.Resize(framework::make_ddim({buffer_size})); float_buffer.mutable_data<T>(ctx.GetPlace()); TensorBuffer float_tensor_buffer(float_buffer); auto* length = ctx.Input<Tensor>("Length"); Tensor left_length = int_tensor_buffer.GetBufferBlock({batch_size, 1}); framework::TensorCopy(*length, curr_place, dev_ctx, &left_length); int64_t max_seq_len = 0; GetMaxValue<DeviceContext, int64_t> get_max_value; get_max_value(dev_ctx, left_length, &max_seq_len); auto* scores = ctx.Output<Tensor>("Scores"); scores->mutable_data<T>(curr_place); auto* path = ctx.Output<Tensor>("Path"); path->Resize({batch_size, max_seq_len}); path->mutable_data<int64_t>(curr_place); Tensor tpath = int_tensor_buffer.GetBufferBlock({max_seq_len, batch_size}); auto batch_path = Unbind(tpath); for (auto it = batch_path.begin(); it != batch_path.end(); ++it) { it->Resize({batch_size}); } // create and init required tensor Tensor input_exp = float_tensor_buffer.GetBufferBlock({seq_len, batch_size, n_labels}); TransCompute<DeviceContext, T>(3, dev_ctx, *input, &input_exp, {1, 0, 2}); auto* transition = ctx.Input<Tensor>("Transition"); Tensor trans_exp = float_tensor_buffer.GetBufferBlock({n_labels, n_labels}); framework::TensorCopy(*transition, curr_place, dev_ctx, &trans_exp); trans_exp.Resize({1, n_labels, n_labels}); Tensor alpha = float_tensor_buffer.GetBufferBlock({batch_size, n_labels}); Tensor zero = int_tensor_buffer.GetBufferBlock({batch_size, 1}); int_functor(dev_ctx, &zero, 0); Tensor one = int_tensor_buffer.GetBufferBlock({batch_size, 1}); int_functor(dev_ctx, &one, 1); Tensor float_one = float_tensor_buffer.GetBufferBlock({batch_size, 1}); float_functor(dev_ctx, &float_one, static_cast<T>(1.0)); Tensor alpha_trn_sum = float_tensor_buffer.GetBufferBlock({batch_size, n_labels, n_labels}); Tensor alpha_max = float_tensor_buffer.GetBufferBlock({batch_size, n_labels}); Tensor alpha_argmax = int_tensor_buffer.GetBufferBlock({seq_len, batch_size, n_labels}); auto alpha_argmax_unbind = Unbind(alpha_argmax); Tensor alpha_nxt = float_tensor_buffer.GetBufferBlock({batch_size, n_labels}); Tensor int_mask = int_tensor_buffer.GetBufferBlock({batch_size}); Tensor zero_len_mask = int_tensor_buffer.GetBufferBlock({batch_size}); Tensor float_mask = float_tensor_buffer.GetBufferBlock({batch_size, 1}); Tensor stop_trans = float_tensor_buffer.GetBufferBlock({1, 1, n_labels}); Tensor start_trans = float_tensor_buffer.GetBufferBlock({1, 1, n_labels}); Tensor rest_trans = float_tensor_buffer.GetBufferBlock({1, n_labels - 2, n_labels}); Tensor last_ids = int_tensor_buffer.GetBufferBlock({batch_size}); Tensor last_ids_tmp = int_tensor_buffer.GetBufferBlock({batch_size}); Tensor batch_offset = int_tensor_buffer.GetBufferBlock({batch_size}); Tensor gather_idx = int_tensor_buffer.GetBufferBlock({batch_size}); std::vector<const Tensor*> shape{&rest_trans, &stop_trans, &start_trans}; std::vector<Tensor*> outputs{&rest_trans, &stop_trans, &start_trans}; math::SplitFunctor<DeviceContext, T> split_functor; split_functor(dev_ctx, trans_exp, shape, 1, &outputs); stop_trans.Resize({1, n_labels}); start_trans.Resize({1, n_labels}); auto logit0 = input_exp.Slice(0, 1); logit0.Resize({batch_size, n_labels}); BinaryOperation<DeviceContext, AddFunctor, T> AddFloat; BinaryOperation<DeviceContext, AddFunctor, int64_t> AddInt; BinaryOperation<DeviceContext, MulFunctor, T> MulFloat; BinaryOperation<DeviceContext, MulFunctor, int64_t> MulInt; BinaryOperation<DeviceContext, SubFunctor, T> SubFloat; BinaryOperation<DeviceContext, SubFunctor, int64_t> SubInt; if (include_bos_eos_tag) { AddFloat(dev_ctx, logit0, start_trans, &alpha); GetMask<DeviceContext, EqualFunctor, T>()(ctx, left_length, one, &float_mask); MulFloat(dev_ctx, stop_trans, float_mask, &alpha_nxt); AddFloat(dev_ctx, alpha, alpha_nxt, &alpha); } else { alpha = logit0; } SubInt(dev_ctx, left_length, one, &left_length); Argmax<DeviceContext, T, int64_t> argmax; for (int64_t i = 1; i < max_seq_len; ++i) { Tensor logit = input_exp.Slice(i, i + 1); logit.Resize({batch_size, n_labels}); Tensor& alpha_exp = alpha.Resize({batch_size, n_labels, 1}); AddFloat(dev_ctx, alpha_exp, trans_exp, &alpha_trn_sum); auto alpha_argmax_temp = alpha_argmax_unbind[i - 1]; alpha_argmax_temp.Resize({batch_size, n_labels}); argmax(ctx, alpha_trn_sum, &alpha_argmax_temp, &alpha_max, 1); historys.emplace_back(alpha_argmax_temp); AddFloat(dev_ctx, alpha_max, logit, &alpha_nxt); alpha.Resize({batch_size, n_labels}); // mask = paddle.cast((left_length > 0), dtype='float32') // alpha = mask * alpha_nxt + (1 - mask) * alpha GetMask<DeviceContext, GreaterThanFunctor, T>()(ctx, left_length, zero, &float_mask); // alpha_nxt = mask * alpha_nxt MulFloat(dev_ctx, alpha_nxt, float_mask, &alpha_nxt); // inv_mask = 1 - mask SubFloat(dev_ctx, float_one, float_mask, &float_mask); // alpha = (1 - mask) * alpha MulFloat(dev_ctx, alpha, float_mask, &alpha); // alpha += alpha_nxt AddFloat(dev_ctx, alpha, alpha_nxt, &alpha); if (include_bos_eos_tag) { GetMask<DeviceContext, EqualFunctor, T>()(ctx, left_length, one, &float_mask); // alpha += mask * trans_exp[:, self.stop_idx] MulFloat(dev_ctx, stop_trans, float_mask, &alpha_nxt); AddFloat(dev_ctx, alpha, alpha_nxt, &alpha); } SubInt(dev_ctx, left_length, one, &left_length); } argmax(ctx, alpha, &last_ids, scores, 1); left_length.Resize({batch_size}); GetMask<DeviceContext, GreaterEqualFunctor, int64_t>()(ctx, left_length, zero, &int_mask); // last_ids_update = last_ids * tag_mask int last_ids_index = 1; int actual_len = (std::min)(seq_len, static_cast<int>(max_seq_len)); MulInt(dev_ctx, last_ids, int_mask, &batch_path[actual_len - last_ids_index]); // The algorithm below can refer to // https://github.com/PaddlePaddle/PaddleNLP/blob/develop/paddlenlp/layers/crf.py#L438 ARange<DeviceContext> arange; arange(dev_ctx, batch_offset.data<int64_t>(), batch_size, n_labels); Gather<DeviceContext, int64_t, int64_t> gather; for (auto hist = historys.rbegin(); hist != historys.rend(); ++hist) { ++last_ids_index; AddInt(dev_ctx, left_length, one, &left_length); AddInt(dev_ctx, batch_offset, last_ids, &gather_idx); Tensor& last_ids_update = batch_path[actual_len - last_ids_index]; hist->Resize({batch_size * n_labels}); gather(dev_ctx, *hist, gather_idx, &last_ids_update); GetMask<DeviceContext, GreaterThanFunctor, int64_t>()(ctx, left_length, zero, &int_mask); MulInt(dev_ctx, last_ids_update, int_mask, &last_ids_update); GetMask<DeviceContext, EqualFunctor, int64_t>()(ctx, left_length, zero, &zero_len_mask); MulInt(dev_ctx, last_ids, zero_len_mask, &last_ids_tmp); SubInt(dev_ctx, one, zero_len_mask, &zero_len_mask); MulInt(dev_ctx, last_ids_update, zero_len_mask, &last_ids_update); AddInt(dev_ctx, last_ids_update, last_ids_tmp, &last_ids_update); GetMask<DeviceContext, LessThanFunctor, int64_t>()(ctx, left_length, zero, &int_mask); MulInt(dev_ctx, last_ids, int_mask, &last_ids); AddInt(dev_ctx, last_ids_update, last_ids, &last_ids); } TransCompute<DeviceContext, int64_t>(2, dev_ctx, tpath, path, {1, 0}); } }; } // namespace operators } // namespace paddle
ten_tusscher_2004_epi_S2_16.c
//Original Ten Tusscher #include <assert.h> #include <stdlib.h> #include "ten_tusscher_2004_epi_S2_16.h" GET_CELL_MODEL_DATA(init_cell_model_data) { assert(cell_model); if(get_initial_v) cell_model->initial_v = INITIAL_V; if(get_neq) cell_model->number_of_ode_equations = NEQ; } //TODO: this should be called only once for the whole mesh, like in the GPU code SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) { // Default initial conditions /* sv[0] = INITIAL_V; // V; millivolt sv[1] = 0.f; //M sv[2] = 0.75; //H sv[3] = 0.75f; //J sv[4] = 0.f; //Xr1 sv[5] = 1.f; //Xr2 sv[6] = 0.f; //Xs sv[7] = 1.f; //S sv[8] = 0.f; //R sv[9] = 0.f; //D sv[10] = 1.f; //F sv[11] = 1.f; //FCa sv[12] = 1.f; //G sv[13] = 0.0002; //Cai sv[14] = 0.2f; //CaSR sv[15] = 11.6f; //Nai sv[16] = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.5670791605140,0.00129039457886424,0.779682705615352,0.779565803667030,0.000174643435814754,0.485077974578703,0.00294054250300053,0.999998347486609,1.93385669757000e-08,1.89136232203263e-05,0.999776056260112,1.00672396485439,0.999986782308567,5.38161476542289e-05,0.868490311201604,8.63756409664858,140.750513717046}; for (uint32_t i = 0; i < NEQ; i++) sv[i] = sv_sst[i]; } SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) { uint32_t sv_id; int i; #pragma omp parallel for private(sv_id) for (i = 0; i < num_cells_to_solve; i++) { if(cells_to_solve) sv_id = cells_to_solve[i]; else sv_id = i; for (int j = 0; j < num_steps; ++j) { solve_model_ode_cpu(dt, sv + (sv_id * NEQ), stim_currents[i]); } } } void solve_model_ode_cpu(real dt, real *sv, real stim_current) { assert(sv); real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu(rY, rDY, stim_current, dt); for(int i = 0; i < NEQ; i++) sv[i] = rDY[i]; } void RHS_cpu(const real *sv, real *rDY_, real stim_current, real dt) { // State variables real svolt = sv[0]; real sm = sv[1]; real sh = sv[2]; real sj = sv[3]; real sxr1 = sv[4]; real sxr2 = sv[5]; real sxs = sv[6]; real ss = sv[7]; real sr = sv[8]; real sd = sv[9]; real sf = sv[10]; real sfca = sv[11]; real sg = sv[12]; real Cai = sv[13]; real CaSR = sv[14]; real Nai = sv[15]; real Ki = sv[16]; //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; ///#ifdef EPI real Gks=0.245; ///#endif ///#ifdef ENDO /// real Gks=0.245; ///#endif ///#ifdef MCELL /// real Gks=0.062; ///#endif //Parameters for Ik1 real GK1=5.405; //Parameters for Ito //#ifdef EPI real Gto=0.294; //#endif // #ifdef ENDO // real Gto=0.073; //#endif //#ifdef MCELL // real Gto=0.294; ///#endif //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; real parameters []={13.5330035924564,0.000240499014921443,0.000154239850149734,0.000711877915989393,0.260543322341942,0.172165226428586,0.131839193192969,3.52745526458537,0.0174254047940632,3.10483520471553,1091.13000918787,0.000588533479543541,0.232717323643346,0.0169078674285819,0.00512908121424897,3.80799254675874e-05}; GNa=parameters[0]; GbNa=parameters[1]; GCaL=parameters[2]; GbCa=parameters[3]; Gto=parameters[4]; Gkr=parameters[5]; Gks=parameters[6]; GK1=parameters[7]; GpK=parameters[8]; knak=parameters[9]; knaca=parameters[10]; Vmaxup=parameters[11]; GpCa=parameters[12]; real arel=parameters[13]; real crel=parameters[14]; real Vleak=parameters[15]; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; ///A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f; A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel; Irel=A*sd*sg; ///Ileak=0.00008f*(CaSR-Cai); Ileak=Vleak*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; #ifdef EPI R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; #endif #ifdef ENDO R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+28)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.; #endif #ifdef MCELL R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; #endif D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; }
mscash1_fmt_plug.c
/* MSCASH patch for john (performance improvement) * * Modified for utf-8 support by magnum in 2011, same terms as below * * Written by Alain Espinosa <alainesp at gmail.com> in 2007. No copyright * is claimed, and the software is hereby placed in the public domain. * In case this attempt to disclaim copyright and place the software in the * public domain is deemed null and void, then the software is * Copyright (c) 2007 Alain Espinosa and it is hereby released to the * general public under the following terms: * * Redistribution and use in source and binary forms, with or without * modification, are permitted. * * There's ABSOLUTELY NO WARRANTY, express or implied. * * (This is a heavily cut-down "BSD license".) */ #if FMT_EXTERNS_H extern struct fmt_main fmt_mscash; #elif FMT_REGISTERS_H john_register_one(&fmt_mscash); #else #include <string.h> #include "arch.h" #include "misc.h" #include "memory.h" #include "common.h" #include "formats.h" #include "unicode.h" #include "options.h" #include "loader.h" #include "johnswap.h" #ifdef _OPENMP #include <omp.h> #endif #include "memdbg.h" #define FORMAT_LABEL "mscash" #define FORMAT_NAME "MS Cache Hash (DCC)" #define ALGORITHM_NAME "MD4 32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH 0 #define PLAINTEXT_LENGTH 27 #define MAX_CIPHERTEXT_LENGTH (2 + 19*3 + 1 + 32) // x3 because salt may be UTF-8 in input /* Note: some tests will be replaced in init() if running UTF-8 */ static struct fmt_tests tests[] = { {"176a4c2bd45ac73687676c2f09045353", "", {"root"} }, // nullstring password {"M$test2#ab60bdb4493822b175486810ac2abe63", "test2" }, {"M$test1#64cd29e36a8431a2b111378564a10631", "test1" }, {"M$test1#64cd29e36a8431a2b111378564a10631", "test1" }, {"M$test1#64cd29e36a8431a2b111378564a10631", "test1" }, {"M$test3#14dd041848e12fc48c0aa7a416a4a00c", "test3" }, {"M$test4#b945d24866af4b01a6d89b9d932a153c", "test4" }, {"64cd29e36a8431a2b111378564a10631", "test1", {"TEST1"} }, // salt is lowercased before hashing {"290efa10307e36a79b3eebf2a6b29455", "okolada", {"nineteen_characters"} }, // max salt length {"ab60bdb4493822b175486810ac2abe63", "test2", {"test2"} }, {"b945d24866af4b01a6d89b9d932a153c", "test4", {"test4"} }, {NULL} }; #define BINARY_SIZE 16 #define BINARY_ALIGN 4 #define SALT_SIZE (11*4) #define SALT_ALIGN 4 #define OK_NUM_KEYS 64 #define BEST_NUM_KEYS 512 #ifdef _OPENMP #define MS_NUM_KEYS (OK_NUM_KEYS * 96) #else #define MS_NUM_KEYS BEST_NUM_KEYS #endif #define MIN_KEYS_PER_CRYPT OK_NUM_KEYS #define MAX_KEYS_PER_CRYPT MS_NUM_KEYS static unsigned int *ms_buffer1x; static unsigned int *output1x; static unsigned int *crypt_out; static unsigned int *last; static unsigned int *last_i; static unsigned int *salt_buffer; static unsigned int new_key; //Init values #define INIT_A 0x67452301 #define INIT_B 0xefcdab89 #define INIT_C 0x98badcfe #define INIT_D 0x10325476 #define SQRT_2 0x5a827999 #define SQRT_3 0x6ed9eba1 static void set_key_utf8(char *_key, int index); static void set_key_encoding(char *_key, int index); static void * get_salt_utf8(char *_ciphertext); static void * get_salt_encoding(char *_ciphertext); struct fmt_main fmt_mscash; #if !ARCH_LITTLE_ENDIAN static inline void swap(unsigned int *x, int count) { while (count--) { *x = JOHNSWAP(*x); x++; } } #endif static void init(struct fmt_main *self) { #ifdef _OPENMP int n = omp_get_max_threads(), nmin, nmax; if (n < 1) n = 1; nmin = OK_NUM_KEYS - (OK_NUM_KEYS % n); if (nmin < n) nmin = n; fmt_mscash.params.min_keys_per_crypt = nmin; nmax = n * BEST_NUM_KEYS; if (nmax > MS_NUM_KEYS) nmax = MS_NUM_KEYS; fmt_mscash.params.max_keys_per_crypt = nmax; #endif ms_buffer1x = mem_calloc_tiny(sizeof(ms_buffer1x[0]) * 16*fmt_mscash.params.max_keys_per_crypt, MEM_ALIGN_WORD); output1x = mem_calloc_tiny(sizeof(output1x[0]) * 4*fmt_mscash.params.max_keys_per_crypt, MEM_ALIGN_WORD); crypt_out = mem_calloc_tiny(sizeof(crypt_out[0]) * 4*fmt_mscash.params.max_keys_per_crypt, MEM_ALIGN_WORD); last = mem_calloc_tiny(sizeof(last[0]) * 4*fmt_mscash.params.max_keys_per_crypt, MEM_ALIGN_WORD); last_i = mem_calloc_tiny(sizeof(last_i[0]) * fmt_mscash.params.max_keys_per_crypt, MEM_ALIGN_WORD); new_key=1; if (pers_opts.target_enc == UTF_8) { fmt_mscash.methods.set_key = set_key_utf8; fmt_mscash.methods.salt = get_salt_utf8; fmt_mscash.params.plaintext_length = (PLAINTEXT_LENGTH * 3); tests[1].ciphertext = "M$\xC3\xBC#48f84e6f73d6d5305f6558a33fa2c9bb"; tests[1].plaintext = "\xC3\xBC"; // German u-umlaut in UTF-8 tests[2].ciphertext = "M$user#9121790702dda0fa5d353014c334c2ce"; tests[2].plaintext = "\xe2\x82\xac\xe2\x82\xac"; // 2 x Euro signs } else if (pers_opts.target_enc == ASCII || pers_opts.target_enc == ISO_8859_1) { tests[1].ciphertext = "M$\xFC#48f84e6f73d6d5305f6558a33fa2c9bb"; tests[1].plaintext = "\xFC"; // German u-umlaut in UTF-8 tests[2].ciphertext = "M$\xFC\xFC#593246a8335cf0261799bda2a2a9c623"; tests[2].plaintext = "\xFC\xFC"; // 2 x Euro signs } else { fmt_mscash.methods.set_key = set_key_encoding; fmt_mscash.methods.salt = get_salt_encoding; } } static char * ms_split(char *ciphertext, int index, struct fmt_main *self) { static char out[MAX_CIPHERTEXT_LENGTH + 1]; int i; for(i = 0; i < MAX_CIPHERTEXT_LENGTH && ciphertext[i]; i++) out[i]=ciphertext[i]; out[i]=0; // lowercase salt as well as hash, encoding-aware enc_strlwr(&out[2]); return out; } static int valid(char *ciphertext, struct fmt_main *self) { unsigned int i; unsigned int l; char insalt[3*19+1]; UTF16 realsalt[21]; int saltlen; if (strncmp(ciphertext, "M$", 2)) return 0; l = strlen(ciphertext); if (l <= 32 || l > MAX_CIPHERTEXT_LENGTH) return 0; l -= 32; if(ciphertext[l-1]!='#') return 0; for (i = l; i < l + 32; i++) if (atoi16[ARCH_INDEX(ciphertext[i])] == 0x7F) return 0; // This is tricky: Max supported salt length is 19 characters of Unicode saltlen = enc_to_utf16(realsalt, 20, (UTF8*)strnzcpy(insalt, &ciphertext[2], l - 2), l - 3); if (saltlen < 0 || saltlen > 19) { static int warned = 0; if (!ldr_in_pot) if (!warned++) fprintf(stderr, "%s: One or more hashes rejected due to salt length limitation\n", FORMAT_LABEL); return 0; } return 1; } static char *prepare(char *split_fields[10], struct fmt_main *self) { char *cp; int i; if (!strncmp(split_fields[1], "M$", 2) || !split_fields[0]) return split_fields[1]; if (!split_fields[0]) return split_fields[1]; // ONLY check, if this string split_fields[1], is ONLY a 32 byte hex string. for (i = 0; i < 32; i++) if (atoi16[ARCH_INDEX(split_fields[1][i])] == 0x7F) return split_fields[1]; cp = mem_alloc(strlen(split_fields[0]) + strlen(split_fields[1]) + 4); sprintf (cp, "M$%s#%s", split_fields[0], split_fields[1]); if (valid(cp, self)) { char *cipher = str_alloc_copy(cp); MEM_FREE(cp); return cipher; } MEM_FREE(cp); return split_fields[1]; } static void set_salt(void *salt) { salt_buffer=salt; } static void *get_salt(char *_ciphertext) { unsigned char *ciphertext = (unsigned char *)_ciphertext; // length=11 for save memory // position 10 = length // 0-9 = 1-19 Unicode characters + EOS marker (0x80) static unsigned int *out=0; unsigned int md4_size; if (!out) out = mem_alloc_tiny(11*sizeof(unsigned int), MEM_ALIGN_WORD); memset(out,0,11*sizeof(unsigned int)); ciphertext+=2; for(md4_size = 0 ;; md4_size++) if(md4_size < 19 && ciphertext[md4_size]!='#') { md4_size++; out[md4_size>>1] = ciphertext[md4_size-1] | ((ciphertext[md4_size]!='#') ? (ciphertext[md4_size]<<16) : 0x800000); if(ciphertext[md4_size]=='#') break; } else { out[md4_size>>1] = 0x80; break; } out[10] = (8 + md4_size) << 4; // dump_stuff(out, 44); return out; } static void *get_salt_encoding(char *_ciphertext) { unsigned char *ciphertext = (unsigned char *)_ciphertext; unsigned char input[19*3+1]; int utf16len, md4_size; static UTF16 *out=0; if (!out) out = mem_alloc_tiny(22*sizeof(UTF16), MEM_ALIGN_WORD); memset(out, 0, 22*sizeof(UTF16)); ciphertext += 2; for (md4_size=0;md4_size<sizeof(input)-1;md4_size++) { if (ciphertext[md4_size] == '#') break; input[md4_size] = ciphertext[md4_size]; } input[md4_size] = 0; utf16len = enc_to_utf16(out, 19, input, md4_size); if (utf16len < 0) utf16len = strlen16(out); #if ARCH_LITTLE_ENDIAN out[utf16len] = 0x80; #else out[utf16len] = 0x8000; swap((unsigned int*)out, (md4_size>>1)+1); #endif ((unsigned int*)out)[10] = (8 + utf16len) << 4; // dump_stuff(out, 44); return out; } static void * get_salt_utf8(char *_ciphertext) { unsigned char *ciphertext = (unsigned char *)_ciphertext; unsigned int md4_size; UTF16 ciphertext_utf16[21]; int len; static ARCH_WORD_32 *out=0; if (!out) out = mem_alloc_tiny(11*sizeof(ARCH_WORD_32), MEM_ALIGN_WORD); memset(out, 0, 11*sizeof(ARCH_WORD_32)); ciphertext+=2; len = ((unsigned char*)strchr((char*)ciphertext, '#')) - ciphertext; utf8_to_utf16(ciphertext_utf16, 20, ciphertext, len+1); for(md4_size = 0 ;; md4_size++) { #if !ARCH_LITTLE_ENDIAN ciphertext_utf16[md4_size] = (ciphertext_utf16[md4_size]>>8)|(ciphertext_utf16[md4_size]<<8); #endif if(md4_size < 19 && ciphertext_utf16[md4_size]!=(UTF16)'#') { md4_size++; #if !ARCH_LITTLE_ENDIAN ciphertext_utf16[md4_size] = (ciphertext_utf16[md4_size]>>8)|(ciphertext_utf16[md4_size]<<8); #endif out[md4_size>>1] = ciphertext_utf16[md4_size-1] | ((ciphertext_utf16[md4_size]!=(UTF16)'#') ? (ciphertext_utf16[md4_size]<<16) : 0x800000); if(ciphertext_utf16[md4_size]==(UTF16)'#') break; } else { out[md4_size>>1] = 0x80; break; } } out[10] = (8 + md4_size) << 4; return out; } static void *get_binary(char *ciphertext) { static unsigned int out[BINARY_SIZE/sizeof(unsigned int)]; unsigned int i=0; unsigned int temp; unsigned int *salt=fmt_mscash.methods.salt(ciphertext); for(;ciphertext[0]!='#';ciphertext++); ciphertext++; for(; i<4 ;i++) { temp = (atoi16[ARCH_INDEX(ciphertext[i*8+0])])<<4; temp |= (atoi16[ARCH_INDEX(ciphertext[i*8+1])]); temp |= (atoi16[ARCH_INDEX(ciphertext[i*8+2])])<<12; temp |= (atoi16[ARCH_INDEX(ciphertext[i*8+3])])<<8; temp |= (atoi16[ARCH_INDEX(ciphertext[i*8+4])])<<20; temp |= (atoi16[ARCH_INDEX(ciphertext[i*8+5])])<<16; temp |= (atoi16[ARCH_INDEX(ciphertext[i*8+6])])<<28; temp |= (atoi16[ARCH_INDEX(ciphertext[i*8+7])])<<24; out[i]=temp; } out[0] -= INIT_A; out[1] -= INIT_B; out[2] -= INIT_C; out[3] -= INIT_D; // Reversed b += (c ^ d ^ a) + salt_buffer[11] + SQRT_3; b = (b << 15) | (b >> 17); out[1] = (out[1] >> 15) | (out[1] << 17); out[1] -= SQRT_3 + (out[2] ^ out[3] ^ out[0]); // Reversed c += (d ^ a ^ b) + salt_buffer[3] + SQRT_3; c = (c << 11) | (c >> 21); out[2] = (out[2] << 21) | (out[2] >> 11); out[2]-= SQRT_3 + (out[3] ^ out[0] ^ out[1]) + salt[3]; // Reversed d += (a ^ b ^ c) + salt_buffer[7] + SQRT_3; d = (d << 9 ) | (d >> 23); out[3] = (out[3] << 23) | (out[3] >> 9); out[3] -= SQRT_3 + (out[0] ^ out[1] ^ out[2]) + salt[7]; //+ SQRT_3; d = (d << 9 ) | (d >> 23); out[3]=(out[3] << 23 ) | (out[3] >> 9); out[3]-=SQRT_3; return out; } static int binary_hash_0(void *binary) { return ((unsigned int*)binary)[3] & 0x0F; } static int binary_hash_1(void *binary) { return ((unsigned int*)binary)[3] & 0xFF; } static int binary_hash_2(void *binary) { return ((unsigned int*)binary)[3] & 0x0FFF; } static int binary_hash_3(void *binary) { return ((unsigned int*)binary)[3] & 0x0FFFF; } static int binary_hash_4(void *binary) { return ((unsigned int*)binary)[3] & 0x0FFFFF; } static int binary_hash_5(void *binary) { return ((unsigned int*)binary)[3] & 0x0FFFFFF; } static int binary_hash_6(void *binary) { return ((unsigned int*)binary)[3] & 0x07FFFFFF; } static int get_hash_0(int index) { return output1x[4*index+3] & 0x0F; } static int get_hash_1(int index) { return output1x[4*index+3] & 0xFF; } static int get_hash_2(int index) { return output1x[4*index+3] & 0x0FFF; } static int get_hash_3(int index) { return output1x[4*index+3] & 0x0FFFF; } static int get_hash_4(int index) { return output1x[4*index+3] & 0x0FFFFF; } static int get_hash_5(int index) { return output1x[4*index+3] & 0x0FFFFFF; } static int get_hash_6(int index) { return output1x[4*index+3] & 0x07FFFFFF; } static void nt_hash(int count) { int i; #if MS_NUM_KEYS > 1 && defined(_OPENMP) #pragma omp parallel for default(none) private(i) shared(count, ms_buffer1x, crypt_out, last) #endif for (i = 0; i < count; i++) { unsigned int a; unsigned int b; unsigned int c; unsigned int d; /* Round 1 */ a = 0xFFFFFFFF + ms_buffer1x[16*i+0];a = (a << 3 ) | (a >> 29); d = INIT_D + (INIT_C ^ (a & 0x77777777)) + ms_buffer1x[16*i+1];d = (d << 7 ) | (d >> 25); c = INIT_C + (INIT_B ^ (d & (a ^ INIT_B)))+ ms_buffer1x[16*i+2];c = (c << 11) | (c >> 21); b = INIT_B + (a ^ (c & (d ^ a))) + ms_buffer1x[16*i+3];b = (b << 19) | (b >> 13); a += (d ^ (b & (c ^ d))) + ms_buffer1x[16*i+4] ;a = (a << 3 ) | (a >> 29); d += (c ^ (a & (b ^ c))) + ms_buffer1x[16*i+5] ;d = (d << 7 ) | (d >> 25); c += (b ^ (d & (a ^ b))) + ms_buffer1x[16*i+6] ;c = (c << 11) | (c >> 21); b += (a ^ (c & (d ^ a))) + ms_buffer1x[16*i+7] ;b = (b << 19) | (b >> 13); a += (d ^ (b & (c ^ d))) + ms_buffer1x[16*i+8] ;a = (a << 3 ) | (a >> 29); d += (c ^ (a & (b ^ c))) + ms_buffer1x[16*i+9] ;d = (d << 7 ) | (d >> 25); c += (b ^ (d & (a ^ b))) + ms_buffer1x[16*i+10] ;c = (c << 11) | (c >> 21); b += (a ^ (c & (d ^ a))) + ms_buffer1x[16*i+11] ;b = (b << 19) | (b >> 13); a += (d ^ (b & (c ^ d))) + ms_buffer1x[16*i+12] ;a = (a << 3 ) | (a >> 29); d += (c ^ (a & (b ^ c))) + ms_buffer1x[16*i+13] ;d = (d << 7 ) | (d >> 25); c += (b ^ (d & (a ^ b))) + ms_buffer1x[16*i+14] ;c = (c << 11) | (c >> 21); b += (a ^ (c & (d ^ a)))/*+ms_buffer1x[16*i+15]*/;b = (b << 19) | (b >> 13); /* Round 2 */ a += ((b & (c | d)) | (c & d)) + ms_buffer1x[16*i+0] + SQRT_2; a = (a << 3 ) | (a >> 29); d += ((a & (b | c)) | (b & c)) + ms_buffer1x[16*i+4] + SQRT_2; d = (d << 5 ) | (d >> 27); c += ((d & (a | b)) | (a & b)) + ms_buffer1x[16*i+8] + SQRT_2; c = (c << 9 ) | (c >> 23); b += ((c & (d | a)) | (d & a)) + ms_buffer1x[16*i+12] + SQRT_2; b = (b << 13) | (b >> 19); a += ((b & (c | d)) | (c & d)) + ms_buffer1x[16*i+1] + SQRT_2; a = (a << 3 ) | (a >> 29); d += ((a & (b | c)) | (b & c)) + ms_buffer1x[16*i+5] + SQRT_2; d = (d << 5 ) | (d >> 27); c += ((d & (a | b)) | (a & b)) + ms_buffer1x[16*i+9] + SQRT_2; c = (c << 9 ) | (c >> 23); b += ((c & (d | a)) | (d & a)) + ms_buffer1x[16*i+13] + SQRT_2; b = (b << 13) | (b >> 19); a += ((b & (c | d)) | (c & d)) + ms_buffer1x[16*i+2] + SQRT_2; a = (a << 3 ) | (a >> 29); d += ((a & (b | c)) | (b & c)) + ms_buffer1x[16*i+6] + SQRT_2; d = (d << 5 ) | (d >> 27); c += ((d & (a | b)) | (a & b)) + ms_buffer1x[16*i+10] + SQRT_2; c = (c << 9 ) | (c >> 23); b += ((c & (d | a)) | (d & a)) + ms_buffer1x[16*i+14] + SQRT_2; b = (b << 13) | (b >> 19); a += ((b & (c | d)) | (c & d)) + ms_buffer1x[16*i+3] + SQRT_2; a = (a << 3 ) | (a >> 29); d += ((a & (b | c)) | (b & c)) + ms_buffer1x[16*i+7] + SQRT_2; d = (d << 5 ) | (d >> 27); c += ((d & (a | b)) | (a & b)) + ms_buffer1x[16*i+11] + SQRT_2; c = (c << 9 ) | (c >> 23); b += ((c & (d | a)) | (d & a))/*+ms_buffer1x[16*i+15]*/+SQRT_2; b = (b << 13) | (b >> 19); /* Round 3 */ a += (b ^ c ^ d) + ms_buffer1x[16*i+0] + SQRT_3; a = (a << 3 ) | (a >> 29); d += (a ^ b ^ c) + ms_buffer1x[16*i+8] + SQRT_3; d = (d << 9 ) | (d >> 23); c += (d ^ a ^ b) + ms_buffer1x[16*i+4] + SQRT_3; c = (c << 11) | (c >> 21); b += (c ^ d ^ a) + ms_buffer1x[16*i+12] + SQRT_3; b = (b << 15) | (b >> 17); a += (b ^ c ^ d) + ms_buffer1x[16*i+2] + SQRT_3; a = (a << 3 ) | (a >> 29); d += (a ^ b ^ c) + ms_buffer1x[16*i+10] + SQRT_3; d = (d << 9 ) | (d >> 23); c += (d ^ a ^ b) + ms_buffer1x[16*i+6] + SQRT_3; c = (c << 11) | (c >> 21); b += (c ^ d ^ a) + ms_buffer1x[16*i+14] + SQRT_3; b = (b << 15) | (b >> 17); a += (b ^ c ^ d) + ms_buffer1x[16*i+1] + SQRT_3; a = (a << 3 ) | (a >> 29); d += (a ^ b ^ c) + ms_buffer1x[16*i+9] + SQRT_3; d = (d << 9 ) | (d >> 23); c += (d ^ a ^ b) + ms_buffer1x[16*i+5] + SQRT_3; c = (c << 11) | (c >> 21); b += (c ^ d ^ a) + ms_buffer1x[16*i+13] + SQRT_3; b = (b << 15) | (b >> 17); a += (b ^ c ^ d) + ms_buffer1x[16*i+3] + SQRT_3; a = (a << 3 ) | (a >> 29); d += (a ^ b ^ c) + ms_buffer1x[16*i+11] + SQRT_3; d = (d << 9 ) | (d >> 23); c += (d ^ a ^ b) + ms_buffer1x[16*i+7] + SQRT_3; c = (c << 11) | (c >> 21); b += (c ^ d ^ a) /*+ ms_buffer1x[16*i+15] */+ SQRT_3; b = (b << 15) | (b >> 17); crypt_out[4*i+0] = a + INIT_A; crypt_out[4*i+1] = b + INIT_B; crypt_out[4*i+2] = c + INIT_C; crypt_out[4*i+3] = d + INIT_D; //Another MD4_crypt for the salt /* Round 1 */ a= 0xFFFFFFFF +crypt_out[4*i+0]; a=(a<<3 )|(a>>29); d=INIT_D + ( INIT_C ^ ( a & 0x77777777)) +crypt_out[4*i+1]; d=(d<<7 )|(d>>25); c=INIT_C + ( INIT_B ^ ( d & ( a ^ INIT_B))) +crypt_out[4*i+2]; c=(c<<11)|(c>>21); b=INIT_B + ( a ^ ( c & ( d ^ a ))) +crypt_out[4*i+3]; b=(b<<19)|(b>>13); last[4*i+0]=a; last[4*i+1]=b; last[4*i+2]=c; last[4*i+3]=d; } } static int crypt_all(int *pcount, struct db_salt *salt) { int count = *pcount; int i; if(new_key) { new_key=0; nt_hash(count); } #if MS_NUM_KEYS > 1 && defined(_OPENMP) #pragma omp parallel for default(none) private(i) shared(count, last, crypt_out, salt_buffer, output1x) #endif for(i = 0; i < count; i++) { unsigned int a; unsigned int b; unsigned int c; unsigned int d; a = last[4*i+0]; b = last[4*i+1]; c = last[4*i+2]; d = last[4*i+3]; a += (d ^ (b & (c ^ d))) + salt_buffer[0] ;a = (a << 3 ) | (a >> 29); d += (c ^ (a & (b ^ c))) + salt_buffer[1] ;d = (d << 7 ) | (d >> 25); c += (b ^ (d & (a ^ b))) + salt_buffer[2] ;c = (c << 11) | (c >> 21); b += (a ^ (c & (d ^ a))) + salt_buffer[3] ;b = (b << 19) | (b >> 13); a += (d ^ (b & (c ^ d))) + salt_buffer[4] ;a = (a << 3 ) | (a >> 29); d += (c ^ (a & (b ^ c))) + salt_buffer[5] ;d = (d << 7 ) | (d >> 25); c += (b ^ (d & (a ^ b))) + salt_buffer[6] ;c = (c << 11) | (c >> 21); b += (a ^ (c & (d ^ a))) + salt_buffer[7] ;b = (b << 19) | (b >> 13); a += (d ^ (b & (c ^ d))) + salt_buffer[8] ;a = (a << 3 ) | (a >> 29); d += (c ^ (a & (b ^ c))) + salt_buffer[9] ;d = (d << 7 ) | (d >> 25); c += (b ^ (d & (a ^ b))) + salt_buffer[10] ;c = (c << 11) | (c >> 21); b += (a ^ (c & (d ^ a)))/*+salt_buffer[11]*/;b = (b << 19) | (b >> 13); /* Round 2 */ a += ((b & (c | d)) | (c & d)) + crypt_out[4*i+0] + SQRT_2; a = (a << 3 ) | (a >> 29); d += ((a & (b | c)) | (b & c)) + salt_buffer[0] + SQRT_2; d = (d << 5 ) | (d >> 27); c += ((d & (a | b)) | (a & b)) + salt_buffer[4] + SQRT_2; c = (c << 9 ) | (c >> 23); b += ((c & (d | a)) | (d & a)) + salt_buffer[8] + SQRT_2; b = (b << 13) | (b >> 19); a += ((b & (c | d)) | (c & d)) + crypt_out[4*i+1] + SQRT_2; a = (a << 3 ) | (a >> 29); d += ((a & (b | c)) | (b & c)) + salt_buffer[1] + SQRT_2; d = (d << 5 ) | (d >> 27); c += ((d & (a | b)) | (a & b)) + salt_buffer[5] + SQRT_2; c = (c << 9 ) | (c >> 23); b += ((c & (d | a)) | (d & a)) + salt_buffer[9] + SQRT_2; b = (b << 13) | (b >> 19); a += ((b & (c | d)) | (c & d)) + crypt_out[4*i+2] + SQRT_2; a = (a << 3 ) | (a >> 29); d += ((a & (b | c)) | (b & c)) + salt_buffer[2] + SQRT_2; d = (d << 5 ) | (d >> 27); c += ((d & (a | b)) | (a & b)) + salt_buffer[6] + SQRT_2; c = (c << 9 ) | (c >> 23); b += ((c & (d | a)) | (d & a)) + salt_buffer[10] + SQRT_2; b = (b << 13) | (b >> 19); a += ((b & (c | d)) | (c & d)) + crypt_out[4*i+3] + SQRT_2; a = (a << 3 ) | (a >> 29); d += ((a & (b | c)) | (b & c)) + salt_buffer[3] + SQRT_2; d = (d << 5 ) | (d >> 27); c += ((d & (a | b)) | (a & b)) + salt_buffer[7] + SQRT_2; c = (c << 9 ) | (c >> 23); b += ((c & (d | a)) | (d & a))/*+ salt_buffer[11]*/+ SQRT_2; b = (b << 13) | (b >> 19); /* Round 3 */ a += (b ^ c ^ d) + crypt_out[4*i+0] + SQRT_3; a = (a << 3 ) | (a >> 29); d += (a ^ b ^ c) + salt_buffer[4] + SQRT_3; d = (d << 9 ) | (d >> 23); c += (d ^ a ^ b) + salt_buffer[0] + SQRT_3; c = (c << 11) | (c >> 21); b += (c ^ d ^ a) + salt_buffer[8] + SQRT_3; b = (b << 15) | (b >> 17); a += (b ^ c ^ d) + crypt_out[4*i+2] + SQRT_3; a = (a << 3 ) | (a >> 29); d += (a ^ b ^ c) + salt_buffer[6] + SQRT_3; d = (d << 9 ) | (d >> 23); c += (d ^ a ^ b) + salt_buffer[2] + SQRT_3; c = (c << 11) | (c >> 21); b += (c ^ d ^ a) + salt_buffer[10] + SQRT_3; b = (b << 15) | (b >> 17); a += (b ^ c ^ d) + crypt_out[4*i+1] + SQRT_3; a = (a << 3 ) | (a >> 29); d += (a ^ b ^ c) + salt_buffer[5]; output1x[4*i+0]=a; output1x[4*i+1]=b; output1x[4*i+2]=c; output1x[4*i+3]=d; } return count; } static int cmp_all(void *binary, int count) { unsigned int i=0; unsigned int d=((unsigned int *)binary)[3]; for(;i<count;i++) if(d==output1x[i*4+3]) return 1; return 0; } static int cmp_one(void * binary, int index) { unsigned int *t=(unsigned int *)binary; unsigned int a=output1x[4*index+0]; unsigned int b=output1x[4*index+1]; unsigned int c=output1x[4*index+2]; unsigned int d=output1x[4*index+3]; if(d!=t[3]) return 0; d+=SQRT_3;d = (d << 9 ) | (d >> 23); c += (d ^ a ^ b) + salt_buffer[1] + SQRT_3; c = (c << 11) | (c >> 21); if(c!=t[2]) return 0; b += (c ^ d ^ a) + salt_buffer[9] + SQRT_3; b = (b << 15) | (b >> 17); if(b!=t[1]) return 0; a += (b ^ c ^ d) + crypt_out[4*index+3]+ SQRT_3; a = (a << 3 ) | (a >> 29); return (a==t[0]); } static int cmp_exact(char *source, int index) { // This check is for the unreal case of collisions. // It verifies that the salts are the same. unsigned int *salt=fmt_mscash.methods.salt(source); unsigned int i=0; for(;i<11;i++) if(salt[i]!=salt_buffer[i]) return 0; return 1; } // This is common code for the SSE/MMX/generic variants of non-UTF8 set_key static inline void set_key_helper(unsigned int * keybuffer, unsigned int xBuf, const unsigned char * key, unsigned int lenStoreOffset, unsigned int *last_length) { unsigned int i=0; unsigned int md4_size=0; for(; key[md4_size] && md4_size < PLAINTEXT_LENGTH; i += xBuf, md4_size++) { unsigned int temp; if ((temp = key[++md4_size])) { keybuffer[i] = key[md4_size-1] | (temp << 16); } else { keybuffer[i] = key[md4_size-1] | 0x800000; goto key_cleaning; } } keybuffer[i] = 0x80; key_cleaning: i += xBuf; for(;i <= *last_length; i += xBuf) keybuffer[i] = 0; *last_length = (md4_size >> 1)+1; keybuffer[lenStoreOffset] = md4_size << 4; } static void set_key(char *_key, int index) { set_key_helper(&ms_buffer1x[index << 4], 1, (unsigned char *)_key, 14, &last_i[index]); //new password_candidate new_key=1; } // UTF-8 conversion right into key buffer // This is common code for the SSE/MMX/generic variants static inline void set_key_helper_utf8(unsigned int * keybuffer, unsigned int xBuf, const UTF8 * source, unsigned int lenStoreOffset, unsigned int *lastlen) { unsigned int *target = keybuffer; UTF32 chl, chh = 0x80; unsigned int outlen = 0; while (*source) { chl = *source; if (chl >= 0xC0) { unsigned int extraBytesToRead = opt_trailingBytesUTF8[chl & 0x3f]; switch (extraBytesToRead) { case 3: ++source; if (*source) { chl <<= 6; chl += *source; } else { *lastlen = ((PLAINTEXT_LENGTH >> 1) + 1) * xBuf; return; } case 2: ++source; if (*source) { chl <<= 6; chl += *source; } else { *lastlen = ((PLAINTEXT_LENGTH >> 1) + 1) * xBuf; return; } case 1: ++source; if (*source) { chl <<= 6; chl += *source; } else { *lastlen = ((PLAINTEXT_LENGTH >> 1) + 1) * xBuf; return; } case 0: break; default: *lastlen = ((PLAINTEXT_LENGTH >> 1) + 1) * xBuf; return; } chl -= offsetsFromUTF8[extraBytesToRead]; } source++; outlen++; if (chl > UNI_MAX_BMP) { if (outlen == PLAINTEXT_LENGTH) { chh = 0x80; *target = (chh << 16) | chl; target += xBuf; *lastlen = ((PLAINTEXT_LENGTH >> 1) + 1) * xBuf; break; } #define halfBase 0x0010000UL #define halfShift 10 #define halfMask 0x3FFUL #define UNI_SUR_HIGH_START (UTF32)0xD800 #define UNI_SUR_LOW_START (UTF32)0xDC00 chl -= halfBase; chh = (UTF16)((chl & halfMask) + UNI_SUR_LOW_START);; chl = (UTF16)((chl >> halfShift) + UNI_SUR_HIGH_START); outlen++; } else if (*source && outlen < PLAINTEXT_LENGTH) { chh = *source; if (chh >= 0xC0) { unsigned int extraBytesToRead = opt_trailingBytesUTF8[chh & 0x3f]; switch (extraBytesToRead) { case 3: ++source; if (*source) { chl <<= 6; chl += *source; } else { *lastlen = ((PLAINTEXT_LENGTH >> 1) + 1) * xBuf; return; } case 2: ++source; if (*source) { chh <<= 6; chh += *source; } else { *lastlen = ((PLAINTEXT_LENGTH >> 1) + 1) * xBuf; return; } case 1: ++source; if (*source) { chh <<= 6; chh += *source; } else { *lastlen = ((PLAINTEXT_LENGTH >> 1) + 1) * xBuf; return; } case 0: break; default: *lastlen = ((PLAINTEXT_LENGTH >> 1) + 1) * xBuf; return; } chh -= offsetsFromUTF8[extraBytesToRead]; } source++; outlen++; } else { chh = 0x80; *target = chh << 16 | chl; target += xBuf; break; } *target = chh << 16 | chl; target += xBuf; } if (chh != 0x80 || outlen == 0) { *target = 0x80; target += xBuf; } while(target < &keybuffer[*lastlen]) { *target = 0; target += xBuf; } *lastlen = ((outlen >> 1) + 1) * xBuf; keybuffer[lenStoreOffset] = outlen << 4; } static void set_key_utf8(char *_key, int index) { set_key_helper_utf8(&ms_buffer1x[index << 4], 1, (UTF8 *)_key, 14, &last_i[index]); //new password_candidate new_key=1; } // This is common code for the SSE/MMX/generic variants of non-UTF8 non-ISO-8859-1 set_key static inline void set_key_helper_encoding(unsigned int * keybuffer, unsigned int xBuf, const unsigned char * key, unsigned int lenStoreOffset, unsigned int *last_length) { unsigned int i=0; int md4_size; md4_size = enc_to_utf16( (UTF16 *)keybuffer, PLAINTEXT_LENGTH, (UTF8 *) key, strlen((char*)key)); if (md4_size < 0) md4_size = strlen16((UTF16 *)keybuffer); #if ARCH_LITTLE_ENDIAN ((UTF16*)keybuffer)[md4_size] = 0x80; #else ((UTF16*)keybuffer)[md4_size] = 0x8000; #endif ((UTF16*)keybuffer)[md4_size+1] = 0; #if !ARCH_LITTLE_ENDIAN ((UTF16*)keybuffer)[md4_size+2] = 0; #endif i = md4_size>>1; i += xBuf; for(;i <= *last_length; i += xBuf) keybuffer[i] = 0; #if !ARCH_LITTLE_ENDIAN swap(keybuffer, (md4_size>>1)+1); #endif *last_length = (md4_size >> 1) + 1; keybuffer[lenStoreOffset] = md4_size << 4; } static void set_key_encoding(char *_key, int index) { set_key_helper_encoding(&ms_buffer1x[index << 4], 1, (unsigned char *)_key, 14, &last_i[index]); //new password_candidate new_key=1; } // Get the key back from the key buffer, from UCS-2 LE static char *get_key(int index) { static union { UTF16 u16[PLAINTEXT_LENGTH + 1]; unsigned int u32[(PLAINTEXT_LENGTH + 1 + 1) / 2]; } key; unsigned int * keybuffer = &ms_buffer1x[index << 4]; unsigned int md4_size; unsigned int i=0; int len = keybuffer[14] >> 4; for(md4_size = 0; md4_size < len; i++, md4_size += 2) { #if ARCH_LITTLE_ENDIAN key.u16[md4_size] = keybuffer[i]; key.u16[md4_size+1] = keybuffer[i] >> 16; #else key.u16[md4_size] = keybuffer[i] >> 16; key.u16[md4_size+1] = keybuffer[i]; #endif } #if !ARCH_LITTLE_ENDIAN swap(key.u32, md4_size >> 1); #endif key.u16[len] = 0x00; return (char *)utf16_to_enc(key.u16); } // Public domain hash function by DJ Bernstein (salt is a username) static int salt_hash(void *salt) { UTF16 *s = salt; unsigned int hash = 5381; while (*s != 0x80) hash = ((hash << 5) + hash) ^ *s++; return hash & (SALT_HASH_SIZE - 1); } struct fmt_main fmt_mscash = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE | FMT_OMP | FMT_UNICODE | FMT_UTF8, #if FMT_MAIN_VERSION > 11 { NULL }, #endif tests }, { init, fmt_default_done, fmt_default_reset, prepare, valid, ms_split, get_binary, get_salt, #if FMT_MAIN_VERSION > 11 { NULL }, #endif fmt_default_source, { binary_hash_0, binary_hash_1, binary_hash_2, binary_hash_3, binary_hash_4, binary_hash_5, binary_hash_6 }, salt_hash, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
attribute.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % AAA TTTTT TTTTT RRRR IIIII BBBB U U TTTTT EEEEE % % A A T T R R I B B U U T E % % AAAAA T T RRRR I BBBB U U T EEE % % A A T T R R I B B U U T E % % A A T T R R IIIII BBBB UUU T EEEEE % % % % % % MagickCore Get / Set Image Attributes % % % % Software Design % % Cristy % % October 2002 % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/client.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colormap.h" #include "MagickCore/colormap-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/composite-private.h" #include "MagickCore/constitute.h" #include "MagickCore/draw.h" #include "MagickCore/draw-private.h" #include "MagickCore/effect.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/geometry.h" #include "MagickCore/histogram.h" #include "MagickCore/identify.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/memory_.h" #include "MagickCore/magick.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/paint.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/property.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum-private.h" #include "MagickCore/random_.h" #include "MagickCore/resource_.h" #include "MagickCore/semaphore.h" #include "MagickCore/segment.h" #include "MagickCore/splay-tree.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/threshold.h" #include "MagickCore/transform.h" #include "MagickCore/utility.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e B o u n d i n g B o x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageBoundingBox() returns the bounding box of an image canvas. % % The format of the GetImageBoundingBox method is: % % RectangleInfo GetImageBoundingBox(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o bounds: Method GetImageBoundingBox returns the bounding box of an % image canvas. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ typedef struct _EdgeInfo { double left, right, top, bottom; } EdgeInfo; static double GetEdgeBackgroundCensus(const Image *image, const CacheView *image_view,const GravityType gravity,const size_t width, const size_t height,const ssize_t x_offset,const ssize_t y_offset, ExceptionInfo *exception) { CacheView *edge_view; const char *artifact; double census; Image *edge_image; PixelInfo background, pixel; RectangleInfo edge_geometry; register const Quantum *p; ssize_t y; /* Determine the percent of image background for this edge. */ switch (gravity) { case NorthWestGravity: case NorthGravity: default: { p=GetCacheViewVirtualPixels(image_view,0,0,1,1,exception); break; } case NorthEastGravity: case EastGravity: { p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,0,1,1, exception); break; } case SouthEastGravity: case SouthGravity: { p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1, (ssize_t) image->rows-1,1,1,exception); break; } case SouthWestGravity: case WestGravity: { p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-1,1,1, exception); break; } } GetPixelInfoPixel(image,p,&background); artifact=GetImageArtifact(image,"trim:background-color"); if (artifact != (const char *) NULL) (void) QueryColorCompliance(artifact,AllCompliance,&background,exception); edge_geometry.width=width; edge_geometry.height=height; edge_geometry.x=x_offset; edge_geometry.y=y_offset; GravityAdjustGeometry(image->columns,image->rows,gravity,&edge_geometry); edge_image=CropImage(image,&edge_geometry,exception); if (edge_image == (Image *) NULL) return(0.0); census=0.0; edge_view=AcquireVirtualCacheView(edge_image,exception); for (y=0; y < (ssize_t) edge_image->rows; y++) { register ssize_t x; p=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) edge_image->columns; x++) { GetPixelInfoPixel(edge_image,p,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,&background) == MagickFalse) census++; p+=GetPixelChannels(edge_image); } } census/=((double) edge_image->columns*edge_image->rows); edge_view=DestroyCacheView(edge_view); edge_image=DestroyImage(edge_image); return(census); } static inline double GetMinEdgeBackgroundCensus(const EdgeInfo *edge) { double census; census=MagickMin(MagickMin(MagickMin(edge->left,edge->right),edge->top), edge->bottom); return(census); } static RectangleInfo GetEdgeBoundingBox(const Image *image, ExceptionInfo *exception) { CacheView *edge_view; const char *artifact; double background_census, percent_background; EdgeInfo edge, vertex; Image *edge_image; RectangleInfo bounds; /* Get the image bounding box. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); SetGeometry(image,&bounds); edge_image=CloneImage(image,0,0,MagickTrue,exception); if (edge_image == (Image *) NULL) return(bounds); (void) ParseAbsoluteGeometry("0x0+0+0",&edge_image->page); (void) memset(&vertex,0,sizeof(vertex)); edge_view=AcquireVirtualCacheView(edge_image,exception); edge.left=GetEdgeBackgroundCensus(edge_image,edge_view,WestGravity, 1,0,0,0,exception); edge.right=GetEdgeBackgroundCensus(edge_image,edge_view,EastGravity, 1,0,0,0,exception); edge.top=GetEdgeBackgroundCensus(edge_image,edge_view,NorthGravity, 0,1,0,0,exception); edge.bottom=GetEdgeBackgroundCensus(edge_image,edge_view,SouthGravity, 0,1,0,0,exception); percent_background=1.0; artifact=GetImageArtifact(edge_image,"trim:percent-background"); if (artifact != (const char *) NULL) percent_background=StringToDouble(artifact,(char **) NULL)/100.0; percent_background=MagickMin(MagickMax(1.0-percent_background,MagickEpsilon), 1.0); background_census=GetMinEdgeBackgroundCensus(&edge); for ( ; background_census < percent_background; background_census=GetMinEdgeBackgroundCensus(&edge)) { if ((bounds.width == 0) || (bounds.height == 0)) break; if (fabs(edge.left-background_census) < MagickEpsilon) { /* Trim left edge. */ vertex.left++; bounds.width--; edge.left=GetEdgeBackgroundCensus(edge_image,edge_view, NorthWestGravity,1,bounds.height,(ssize_t) vertex.left,(ssize_t) vertex.top,exception); edge.top=GetEdgeBackgroundCensus(edge_image,edge_view, NorthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t) vertex.top,exception); edge.bottom=GetEdgeBackgroundCensus(edge_image,edge_view, SouthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t) vertex.bottom,exception); continue; } if (fabs(edge.right-background_census) < MagickEpsilon) { /* Trim right edge. */ vertex.right++; bounds.width--; edge.right=GetEdgeBackgroundCensus(edge_image,edge_view, NorthEastGravity,1,bounds.height,(ssize_t) vertex.right,(ssize_t) vertex.top,exception); edge.top=GetEdgeBackgroundCensus(edge_image,edge_view, NorthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t) vertex.top,exception); edge.bottom=GetEdgeBackgroundCensus(edge_image,edge_view, SouthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t) vertex.bottom,exception); continue; } if (fabs(edge.top-background_census) < MagickEpsilon) { /* Trim top edge. */ vertex.top++; bounds.height--; edge.left=GetEdgeBackgroundCensus(edge_image,edge_view, NorthWestGravity,1,bounds.height,(ssize_t) vertex.left,(ssize_t) vertex.top,exception); edge.right=GetEdgeBackgroundCensus(edge_image,edge_view, NorthEastGravity,1,bounds.height,(ssize_t) vertex.right,(ssize_t) vertex.top,exception); edge.top=GetEdgeBackgroundCensus(edge_image,edge_view, NorthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t) vertex.top,exception); continue; } if (fabs(edge.bottom-background_census) < MagickEpsilon) { /* Trim bottom edge. */ vertex.bottom++; bounds.height--; edge.left=GetEdgeBackgroundCensus(edge_image,edge_view, NorthWestGravity,1,bounds.height,(ssize_t) vertex.left,(ssize_t) vertex.top,exception); edge.right=GetEdgeBackgroundCensus(edge_image,edge_view, NorthEastGravity,1,bounds.height,(ssize_t) vertex.right,(ssize_t) vertex.top,exception); edge.bottom=GetEdgeBackgroundCensus(edge_image,edge_view, SouthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t) vertex.bottom,exception); continue; } } edge_view=DestroyCacheView(edge_view); edge_image=DestroyImage(edge_image); bounds.x=(ssize_t) vertex.left; bounds.y=(ssize_t) vertex.top; if ((bounds.width == 0) || (bounds.height == 0)) (void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, "GeometryDoesNotContainImage","`%s'",image->filename); return(bounds); } MagickExport RectangleInfo GetImageBoundingBox(const Image *image, ExceptionInfo *exception) { CacheView *image_view; const char *artifact; MagickBooleanType status; PixelInfo target[3], zero; RectangleInfo bounds; register const Quantum *p; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); artifact=GetImageArtifact(image,"trim:percent-background"); if (artifact != (const char *) NULL) return(GetEdgeBoundingBox(image,exception)); bounds.width=0; bounds.height=0; bounds.x=(ssize_t) image->columns; bounds.y=(ssize_t) image->rows; GetPixelInfo(image,&target[0]); image_view=AcquireVirtualCacheView(image,exception); p=GetCacheViewVirtualPixels(image_view,0,0,1,1,exception); if (p == (const Quantum *) NULL) { image_view=DestroyCacheView(image_view); return(bounds); } GetPixelInfoPixel(image,p,&target[0]); GetPixelInfo(image,&target[1]); p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,0,1,1, exception); if (p != (const Quantum *) NULL) GetPixelInfoPixel(image,p,&target[1]); GetPixelInfo(image,&target[2]); p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-1,1,1, exception); if (p != (const Quantum *) NULL) GetPixelInfoPixel(image,p,&target[2]); status=MagickTrue; GetPixelInfo(image,&zero); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { PixelInfo pixel; RectangleInfo bounding_box; register const Quantum *magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; #if defined(MAGICKCORE_OPENMP_SUPPORT) # pragma omp critical (MagickCore_GetImageBoundingBox) #endif bounding_box=bounds; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,p,&pixel); if ((x < bounding_box.x) && (IsFuzzyEquivalencePixelInfo(&pixel,&target[0]) == MagickFalse)) bounding_box.x=x; if ((x > (ssize_t) bounding_box.width) && (IsFuzzyEquivalencePixelInfo(&pixel,&target[1]) == MagickFalse)) bounding_box.width=(size_t) x; if ((y < bounding_box.y) && (IsFuzzyEquivalencePixelInfo(&pixel,&target[0]) == MagickFalse)) bounding_box.y=y; if ((y > (ssize_t) bounding_box.height) && (IsFuzzyEquivalencePixelInfo(&pixel,&target[2]) == MagickFalse)) bounding_box.height=(size_t) y; p+=GetPixelChannels(image); } #if defined(MAGICKCORE_OPENMP_SUPPORT) # pragma omp critical (MagickCore_GetImageBoundingBox) #endif { if (bounding_box.x < bounds.x) bounds.x=bounding_box.x; if (bounding_box.y < bounds.y) bounds.y=bounding_box.y; if (bounding_box.width > bounds.width) bounds.width=bounding_box.width; if (bounding_box.height > bounds.height) bounds.height=bounding_box.height; } } image_view=DestroyCacheView(image_view); if ((bounds.width == 0) || (bounds.height == 0)) (void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, "GeometryDoesNotContainImage","`%s'",image->filename); else { bounds.width-=(bounds.x-1); bounds.height-=(bounds.y-1); } return(bounds); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e C o n v e x H u l l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageConvexHull() returns the convex hull points of an image canvas. % % The format of the GetImageConvexHull method is: % % PointInfo *GetImageConvexHull(const Image *image, % size_t number_coordinates,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o number_coordinates: the number of coordinates in the convex hull. % % o exception: return any errors or warnings in this structure. % */ static double LexicographicalOrder(PointInfo *a,PointInfo *b,PointInfo *c) { /* Order by x-coordinate, and in case of a tie, by y-coordinate. */ return((b->x-a->x)*(c->y-a->y)-(b->y-a->y)*(c->x-a->x)); } static PixelInfo GetEdgeBackgroundColor(const Image *image, const CacheView *image_view,ExceptionInfo *exception) { const char *artifact; double census[4], edge_census; PixelInfo background[4], edge_background; register ssize_t i; /* Most dominant color of edges/corners is the background color of the image. */ artifact=GetImageArtifact(image,"convex-hull:background-color"); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i < 4; i++) { CacheView *edge_view; GravityType gravity; Image *edge_image; PixelInfo pixel; RectangleInfo edge_geometry; register const Quantum *p; ssize_t y; census[i]=0.0; (void) memset(&edge_geometry,0,sizeof(edge_geometry)); switch (i) { case 0: default: { p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-1,1,1, exception); gravity=WestGravity; edge_geometry.width=1; edge_geometry.height=0; } case 1: { p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,0,1,1, exception); gravity=EastGravity; edge_geometry.width=1; edge_geometry.height=0; } case 2: { p=GetCacheViewVirtualPixels(image_view,0,0,1,1,exception); gravity=NorthGravity; edge_geometry.width=0; edge_geometry.height=1; } case 3: { p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1, (ssize_t) image->rows-1,1,1,exception); gravity=SouthGravity; edge_geometry.width=0; edge_geometry.height=1; } } GetPixelInfoPixel(image,p,background+i); if (artifact != (const char *) NULL) (void) QueryColorCompliance(artifact,AllCompliance,background+i, exception); GravityAdjustGeometry(image->columns,image->rows,gravity,&edge_geometry); edge_image=CropImage(image,&edge_geometry,exception); if (edge_image == (Image *) NULL) continue; edge_view=AcquireVirtualCacheView(edge_image,exception); for (y=0; y < (ssize_t) edge_image->rows; y++) { register ssize_t x; p=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1, exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) edge_image->columns; x++) { GetPixelInfoPixel(edge_image,p,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,background+i) == MagickFalse) census[i]++; p+=GetPixelChannels(edge_image); } } edge_view=DestroyCacheView(edge_view); edge_image=DestroyImage(edge_image); } edge_census=(-1.0); for (i=0; i < 4; i++) if (census[i] > edge_census) { edge_background=background[i]; edge_census=census[i]; } return(edge_background); } void TraceConvexHull(PointInfo *coordinates,size_t number_coordinates, PointInfo ***monotone_chain,size_t *chain_length) { PointInfo **chain; register ssize_t i; size_t demark, n; /* Construct the upper and lower hulls: rightmost to leftmost counterclockwise. */ chain=(*monotone_chain); n=0; for (i=0; i < (ssize_t) number_coordinates; i++) { while ((n >= 2) && (LexicographicalOrder(chain[n-2],chain[n-1],&coordinates[i]) <= 0.0)) n--; chain[n++]=(&coordinates[i]); } demark=n+1; for (i=(ssize_t) number_coordinates-2; i >= 0; i--) { while ((n >= demark) && (LexicographicalOrder(chain[n-2],chain[n-1],&coordinates[i]) <= 0.0)) n--; chain[n++]=(&coordinates[i]); } *chain_length=n; } MagickExport PointInfo *GetImageConvexHull(const Image *image, size_t *number_coordinates,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; MemoryInfo *coordinate_info; PixelInfo background; PointInfo *convex_hull, *coordinates, **monotone_chain; size_t n; ssize_t y; /* Identify convex hull coordinates of image foreground object(s). */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); *number_coordinates=0; coordinate_info=AcquireVirtualMemory(image->columns,image->rows* sizeof(*coordinates)); monotone_chain=(PointInfo **) AcquireQuantumMemory(2*image->columns,2* image->rows*sizeof(*monotone_chain)); if ((coordinate_info == (MemoryInfo *) NULL) || (monotone_chain == (PointInfo **) NULL)) { if (monotone_chain != (PointInfo **) NULL) monotone_chain=(PointInfo **) RelinquishMagickMemory(monotone_chain); if (coordinate_info != (MemoryInfo *) NULL) coordinate_info=RelinquishVirtualMemory(coordinate_info); return((PointInfo *) NULL); } coordinates=(PointInfo *) GetVirtualMemoryBlob(coordinate_info); image_view=AcquireVirtualCacheView(image,exception); background=GetEdgeBackgroundColor(image,image_view,exception); status=MagickTrue; n=0; for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { PixelInfo pixel; GetPixelInfoPixel(image,p,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,&background) == MagickFalse) { coordinates[n].x=(double) x; coordinates[n].y=(double) y; n++; } p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); /* Return the convex hull of the image foreground object(s). */ TraceConvexHull(coordinates,n,&monotone_chain,number_coordinates); convex_hull=(PointInfo *) AcquireQuantumMemory(*number_coordinates, sizeof(*convex_hull)); if (convex_hull != (PointInfo *) NULL) for (n=0; n < *number_coordinates; n++) convex_hull[n]=(*monotone_chain[n]); monotone_chain=(PointInfo **) RelinquishMagickMemory(monotone_chain); coordinate_info=RelinquishVirtualMemory(coordinate_info); return(convex_hull); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageDepth() returns the depth of a particular image channel. % % The format of the GetImageDepth method is: % % size_t GetImageDepth(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport size_t GetImageDepth(const Image *image,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; register ssize_t i; size_t *current_depth, depth, number_threads; ssize_t y; /* Compute image depth. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); number_threads=(size_t) GetMagickResourceLimit(ThreadResource); current_depth=(size_t *) AcquireQuantumMemory(number_threads, sizeof(*current_depth)); if (current_depth == (size_t *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); status=MagickTrue; for (i=0; i < (ssize_t) number_threads; i++) current_depth[i]=1; if ((image->storage_class == PseudoClass) && (image->alpha_trait == UndefinedPixelTrait)) { for (i=0; i < (ssize_t) image->colors; i++) { const int id = GetOpenMPThreadId(); while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH) { MagickBooleanType atDepth; QuantumAny range; atDepth=MagickTrue; range=GetQuantumRange(current_depth[id]); if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) if (IsPixelAtDepth(ClampToQuantum(image->colormap[i].red),range) == MagickFalse) atDepth=MagickFalse; if ((atDepth != MagickFalse) && (GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) if (IsPixelAtDepth(ClampToQuantum(image->colormap[i].green),range) == MagickFalse) atDepth=MagickFalse; if ((atDepth != MagickFalse) && (GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) if (IsPixelAtDepth(ClampToQuantum(image->colormap[i].blue),range) == MagickFalse) atDepth=MagickFalse; if ((atDepth != MagickFalse)) break; current_depth[id]++; } } depth=current_depth[0]; for (i=1; i < (ssize_t) number_threads; i++) if (depth < current_depth[i]) depth=current_depth[i]; current_depth=(size_t *) RelinquishMagickMemory(current_depth); return(depth); } image_view=AcquireVirtualCacheView(image,exception); #if !defined(MAGICKCORE_HDRI_SUPPORT) if ((1UL*QuantumRange) <= MaxMap) { size_t *depth_map; /* Scale pixels to desired (optimized with depth map). */ depth_map=(size_t *) AcquireQuantumMemory(MaxMap+1,sizeof(*depth_map)); if (depth_map == (size_t *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); for (i=0; i <= (ssize_t) MaxMap; i++) { unsigned int depth; for (depth=1; depth < MAGICKCORE_QUANTUM_DEPTH; depth++) { Quantum pixel; QuantumAny range; range=GetQuantumRange(depth); pixel=(Quantum) i; if (pixel == ScaleAnyToQuantum(ScaleQuantumToAny(pixel,range),range)) break; } depth_map[i]=depth; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); register const Quantum *magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) continue; for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if (depth_map[ScaleQuantumToMap(p[i])] > current_depth[id]) current_depth[id]=depth_map[ScaleQuantumToMap(p[i])]; } p+=GetPixelChannels(image); } if (current_depth[id] == MAGICKCORE_QUANTUM_DEPTH) status=MagickFalse; } image_view=DestroyCacheView(image_view); depth=current_depth[0]; for (i=1; i < (ssize_t) number_threads; i++) if (depth < current_depth[i]) depth=current_depth[i]; depth_map=(size_t *) RelinquishMagickMemory(depth_map); current_depth=(size_t *) RelinquishMagickMemory(current_depth); return(depth); } #endif /* Compute pixel depth. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); register const Quantum *magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) continue; for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel; PixelTrait traits; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH) { QuantumAny range; range=GetQuantumRange(current_depth[id]); if (p[i] == ScaleAnyToQuantum(ScaleQuantumToAny(p[i],range),range)) break; current_depth[id]++; } } p+=GetPixelChannels(image); } if (current_depth[id] == MAGICKCORE_QUANTUM_DEPTH) status=MagickFalse; } image_view=DestroyCacheView(image_view); depth=current_depth[0]; for (i=1; i < (ssize_t) number_threads; i++) if (depth < current_depth[i]) depth=current_depth[i]; current_depth=(size_t *) RelinquishMagickMemory(current_depth); return(depth); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e Q u a n t u m D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageQuantumDepth() returns the depth of the image rounded to a legal % quantum depth: 8, 16, or 32. % % The format of the GetImageQuantumDepth method is: % % size_t GetImageQuantumDepth(const Image *image, % const MagickBooleanType constrain) % % A description of each parameter follows: % % o image: the image. % % o constrain: A value other than MagickFalse, constrains the depth to % a maximum of MAGICKCORE_QUANTUM_DEPTH. % */ MagickExport size_t GetImageQuantumDepth(const Image *image, const MagickBooleanType constrain) { size_t depth; depth=image->depth; if (depth <= 8) depth=8; else if (depth <= 16) depth=16; else if (depth <= 32) depth=32; else if (depth <= 64) depth=64; if (constrain != MagickFalse) depth=(size_t) MagickMin((double) depth,(double) MAGICKCORE_QUANTUM_DEPTH); return(depth); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageType() returns the type of image: % % Bilevel Grayscale GrayscaleMatte % Palette PaletteMatte TrueColor % TrueColorMatte ColorSeparation ColorSeparationMatte % % The format of the GetImageType method is: % % ImageType GetImageType(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport ImageType GetImageType(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->colorspace == CMYKColorspace) { if (image->alpha_trait == UndefinedPixelTrait) return(ColorSeparationType); return(ColorSeparationAlphaType); } if (IsImageMonochrome(image) != MagickFalse) return(BilevelType); if (IsImageGray(image) != MagickFalse) { if (image->alpha_trait != UndefinedPixelTrait) return(GrayscaleAlphaType); return(GrayscaleType); } if (IsPaletteImage(image) != MagickFalse) { if (image->alpha_trait != UndefinedPixelTrait) return(PaletteAlphaType); return(PaletteType); } if (image->alpha_trait != UndefinedPixelTrait) return(TrueColorAlphaType); return(TrueColorType); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I d e n t i f y I m a g e G r a y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IdentifyImageGray() returns grayscale if all the pixels in the image have % the same red, green, and blue intensities, and bi-level is the intensity is % either 0 or QuantumRange. Otherwise undefined is returned. % % The format of the IdentifyImageGray method is: % % ImageType IdentifyImageGray(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport ImageType IdentifyImageGray(const Image *image, ExceptionInfo *exception) { CacheView *image_view; ImageType type; register const Quantum *p; register ssize_t x; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((image->type == BilevelType) || (image->type == GrayscaleType) || (image->type == GrayscaleAlphaType)) return(image->type); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) return(UndefinedType); type=BilevelType; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (IsPixelGray(image,p) == MagickFalse) { type=UndefinedType; break; } if ((type == BilevelType) && (IsPixelMonochrome(image,p) == MagickFalse)) type=GrayscaleType; p+=GetPixelChannels(image); } if (type == UndefinedType) break; } image_view=DestroyCacheView(image_view); if ((type == GrayscaleType) && (image->alpha_trait != UndefinedPixelTrait)) type=GrayscaleAlphaType; return(type); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I d e n t i f y I m a g e M o n o c h r o m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IdentifyImageMonochrome() returns MagickTrue if all the pixels in the image % have the same red, green, and blue intensities and the intensity is either % 0 or QuantumRange. % % The format of the IdentifyImageMonochrome method is: % % MagickBooleanType IdentifyImageMonochrome(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType IdentifyImageMonochrome(const Image *image, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType bilevel; register ssize_t x; register const Quantum *p; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->type == BilevelType) return(MagickTrue); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) return(MagickFalse); bilevel=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (IsPixelMonochrome(image,p) == MagickFalse) { bilevel=MagickFalse; break; } p+=GetPixelChannels(image); } if (bilevel == MagickFalse) break; } image_view=DestroyCacheView(image_view); return(bilevel); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I d e n t i f y I m a g e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IdentifyImageType() returns the potential type of image: % % Bilevel Grayscale GrayscaleMatte % Palette PaletteMatte TrueColor % TrueColorMatte ColorSeparation ColorSeparationMatte % % To ensure the image type matches its potential, use SetImageType(): % % (void) SetImageType(image,IdentifyImageType(image,exception),exception); % % The format of the IdentifyImageType method is: % % ImageType IdentifyImageType(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport ImageType IdentifyImageType(const Image *image, ExceptionInfo *exception) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->colorspace == CMYKColorspace) { if (image->alpha_trait == UndefinedPixelTrait) return(ColorSeparationType); return(ColorSeparationAlphaType); } if (IdentifyImageMonochrome(image,exception) != MagickFalse) return(BilevelType); if (IdentifyImageGray(image,exception) != UndefinedType) { if (image->alpha_trait != UndefinedPixelTrait) return(GrayscaleAlphaType); return(GrayscaleType); } if (IdentifyPaletteImage(image,exception) != MagickFalse) { if (image->alpha_trait != UndefinedPixelTrait) return(PaletteAlphaType); return(PaletteType); } if (image->alpha_trait != UndefinedPixelTrait) return(TrueColorAlphaType); return(TrueColorType); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s I m a g e G r a y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsImageGray() returns MagickTrue if the type of the image is grayscale or % bi-level. % % The format of the IsImageGray method is: % % MagickBooleanType IsImageGray(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType IsImageGray(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if ((image->type == BilevelType) || (image->type == GrayscaleType) || (image->type == GrayscaleAlphaType)) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s I m a g e M o n o c h r o m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsImageMonochrome() returns MagickTrue if type of the image is bi-level. % % The format of the IsImageMonochrome method is: % % MagickBooleanType IsImageMonochrome(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType IsImageMonochrome(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->type == BilevelType) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s I m a g e O p a q u e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsImageOpaque() returns MagickTrue if none of the pixels in the image have % an alpha value other than OpaqueAlpha (QuantumRange). % % Will return true immediatally is alpha channel is not available. % % The format of the IsImageOpaque method is: % % MagickBooleanType IsImageOpaque(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType IsImageOpaque(const Image *image, ExceptionInfo *exception) { CacheView *image_view; register const Quantum *p; register ssize_t x; ssize_t y; /* Determine if image is opaque. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->alpha_trait == UndefinedPixelTrait) return(MagickTrue); image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelAlpha(image,p) != OpaqueAlpha) break; p+=GetPixelChannels(image); } if (x < (ssize_t) image->columns) break; } image_view=DestroyCacheView(image_view); return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageDepth() sets the depth of the image. % % The format of the SetImageDepth method is: % % MagickBooleanType SetImageDepth(Image *image,const size_t depth, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o depth: the image depth. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageDepth(Image *image, const size_t depth,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; QuantumAny range; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (depth >= MAGICKCORE_QUANTUM_DEPTH) { image->depth=depth; return(MagickTrue); } range=GetQuantumRange(depth); if (image->storage_class == PseudoClass) { register ssize_t i; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->colors,1) #endif for (i=0; i < (ssize_t) image->colors; i++) { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red=(double) ScaleAnyToQuantum(ScaleQuantumToAny( ClampPixel(image->colormap[i].red),range),range); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green=(double) ScaleAnyToQuantum(ScaleQuantumToAny( ClampPixel(image->colormap[i].green),range),range); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue=(double) ScaleAnyToQuantum(ScaleQuantumToAny( ClampPixel(image->colormap[i].blue),range),range); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].alpha=(double) ScaleAnyToQuantum(ScaleQuantumToAny( ClampPixel(image->colormap[i].alpha),range),range); } } status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if !defined(MAGICKCORE_HDRI_SUPPORT) if ((1UL*QuantumRange) <= MaxMap) { Quantum *depth_map; register ssize_t i; /* Scale pixels to desired (optimized with depth map). */ depth_map=(Quantum *) AcquireQuantumMemory(MaxMap+1,sizeof(*depth_map)); if (depth_map == (Quantum *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); for (i=0; i <= (ssize_t) MaxMap; i++) depth_map[i]=ScaleAnyToQuantum(ScaleQuantumToAny((Quantum) i,range), range); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel; PixelTrait traits; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i]=depth_map[ScaleQuantumToMap(q[i])]; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) { status=MagickFalse; continue; } } image_view=DestroyCacheView(image_view); depth_map=(Quantum *) RelinquishMagickMemory(depth_map); if (status != MagickFalse) image->depth=depth; return(status); } #endif /* Scale pixels to desired depth. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel; PixelTrait traits; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ScaleAnyToQuantum(ScaleQuantumToAny(ClampPixel((MagickRealType) q[i]),range),range); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) { status=MagickFalse; continue; } } image_view=DestroyCacheView(image_view); if (status != MagickFalse) image->depth=depth; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageType() sets the type of image. Choose from these types: % % Bilevel Grayscale GrayscaleMatte % Palette PaletteMatte TrueColor % TrueColorMatte ColorSeparation ColorSeparationMatte % OptimizeType % % The format of the SetImageType method is: % % MagickBooleanType SetImageType(Image *image,const ImageType type, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o type: Image type. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageType(Image *image,const ImageType type, ExceptionInfo *exception) { const char *artifact; ImageInfo *image_info; MagickBooleanType status; QuantizeInfo *quantize_info; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); status=MagickTrue; image_info=AcquireImageInfo(); image_info->dither=image->dither; artifact=GetImageArtifact(image,"dither"); if (artifact != (const char *) NULL) (void) SetImageOption(image_info,"dither",artifact); switch (type) { case BilevelType: { status=TransformImageColorspace(image,GRAYColorspace,exception); (void) NormalizeImage(image,exception); quantize_info=AcquireQuantizeInfo(image_info); quantize_info->number_colors=2; quantize_info->colorspace=GRAYColorspace; status=QuantizeImage(quantize_info,image,exception); quantize_info=DestroyQuantizeInfo(quantize_info); image->alpha_trait=UndefinedPixelTrait; break; } case GrayscaleType: { status=TransformImageColorspace(image,GRAYColorspace,exception); image->alpha_trait=UndefinedPixelTrait; break; } case GrayscaleAlphaType: { status=TransformImageColorspace(image,GRAYColorspace,exception); if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); break; } case PaletteType: { status=TransformImageColorspace(image,sRGBColorspace,exception); if ((image->storage_class == DirectClass) || (image->colors > 256)) { quantize_info=AcquireQuantizeInfo(image_info); quantize_info->number_colors=256; status=QuantizeImage(quantize_info,image,exception); quantize_info=DestroyQuantizeInfo(quantize_info); } image->alpha_trait=UndefinedPixelTrait; break; } case PaletteBilevelAlphaType: { ChannelType channel_mask; status=TransformImageColorspace(image,sRGBColorspace,exception); if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); channel_mask=SetImageChannelMask(image,AlphaChannel); (void) BilevelImage(image,(double) QuantumRange/2.0,exception); (void) SetImageChannelMask(image,channel_mask); quantize_info=AcquireQuantizeInfo(image_info); status=QuantizeImage(quantize_info,image,exception); quantize_info=DestroyQuantizeInfo(quantize_info); break; } case PaletteAlphaType: { status=TransformImageColorspace(image,sRGBColorspace,exception); if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); quantize_info=AcquireQuantizeInfo(image_info); quantize_info->colorspace=TransparentColorspace; status=QuantizeImage(quantize_info,image,exception); quantize_info=DestroyQuantizeInfo(quantize_info); break; } case TrueColorType: { status=TransformImageColorspace(image,sRGBColorspace,exception); if (image->storage_class != DirectClass) status=SetImageStorageClass(image,DirectClass,exception); image->alpha_trait=UndefinedPixelTrait; break; } case TrueColorAlphaType: { status=TransformImageColorspace(image,sRGBColorspace,exception); if (image->storage_class != DirectClass) status=SetImageStorageClass(image,DirectClass,exception); if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); break; } case ColorSeparationType: { status=TransformImageColorspace(image,CMYKColorspace,exception); if (image->storage_class != DirectClass) status=SetImageStorageClass(image,DirectClass,exception); image->alpha_trait=UndefinedPixelTrait; break; } case ColorSeparationAlphaType: { status=TransformImageColorspace(image,CMYKColorspace,exception); if (image->storage_class != DirectClass) status=SetImageStorageClass(image,DirectClass,exception); if (image->alpha_trait == UndefinedPixelTrait) status=SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); break; } case OptimizeType: case UndefinedType: break; } image_info=DestroyImageInfo(image_info); if (status == MagickFalse) return(status); image->type=type; return(MagickTrue); }
coro_preconditioner.h
/** * @author : Zhao Chonyyao (cyzhao@zju.edu.cn) * @date : 2021-04-30 * @description: precoditioner for co-rotational solver * @version : 1.0 */ #ifndef CORO_PRECONDITIONER #define CORO_PRECONDITIONER #include <Eigen/SparseCholesky> #include "Common/search_eigenvalues.h" #include "Common/diag_BCSR.h" #include "Model/fem/elas_energy.h" #include "preconditioner.h" template <typename T> using ldlt_type = Eigen::SimplicialLDLT<SPM_C<T>>; template <typename T> using ldlt_ptr = std::shared_ptr<ldlt_type<T>>; template <typename T> using elas_intf_ptr = std::shared_ptr<PhysIKA::elas_intf<T, 3>>; namespace PhysIKA { /** * corotated preconditioner class * */ template <typename T> class coro_preconditioner : public preconditioner<T> { public: coro_preconditioner(const ldlt_ptr<T>& fac_hes_rest, const std::shared_ptr<diag_BCSR<T, 3>>& R) : fac_hes_rest_(fac_hes_rest), R_(R), RT_(R_->transpose()) {} /*use R(x) A_bar R(x)T to approximate A(x), where A_bar is the hessian at the rest shape, R(x) is the assembled rotation matrix for each vertex.*/ VEC<T> operator()(const VEC<T>& r) const { VEC<T> RT_r = RT_ * r; VEC<T> A_RT_r = fac_hes_rest_->solve(RT_r); return (*R_) * A_RT_r; }; protected: const ldlt_ptr<T> fac_hes_rest_; const std::shared_ptr<diag_BCSR<T, 3>> R_; const diag_BCSR<T, 3> RT_; }; /** * factory for generating coro preconditioner * */ template <typename T> class coro_precond_fac { public: coro_precond_fac(const ldlt_ptr<T>& fac_hes_rest) : fac_hes_rest_(fac_hes_rest), R_(std::make_shared<diag_BCSR<T, 3>>()) { exit_if(fac_hes_rest_->info() != Eigen::Success, "Rest hessian can not perform LDLT."); } std::shared_ptr<coro_preconditioner<T>> build_preconditioner() const { exit_if(R_->size() == 0 || fac_hes_rest_ == nullptr); return std::make_shared<coro_preconditioner<T>>(fac_hes_rest_, R_); } std::shared_ptr<diag_BCSR<T, 3>> get_R() const { return R_; } private: const ldlt_ptr<T> fac_hes_rest_; protected: std::shared_ptr<diag_BCSR<T, 3>> R_; VEC_MAT<MAT3<T>> R_per_nod_; }; /** * geometry multigrid based coro preconditioner factory. * */ template <typename T> class gmg_coro_precond_fac : public coro_precond_fac<T> { public: gmg_coro_precond_fac(const ldlt_ptr<T>& fac_hes_rest, const elas_intf_ptr<T>& elas) : coro_precond_fac<T>(fac_hes_rest), elas_(elas) {} int opt_R(const T* x) { IF_ERR(return, elas_->aver_ele_R(x, this->R_per_nod_)); this->R_->setFromDiagMat(this->R_per_nod_); return 0; } private: const elas_intf_ptr<T> elas_; }; /** * algebraic multigrid based coro preconditioner factory. * */ template <typename T> class amg_coro_precond_fac : public coro_precond_fac<T> { public: //TODO: maybe setting A_bar_diag as argument is better than get_block_diagonal each time amg_coro_precond_fac(const ldlt_ptr<T>& fac_hes_rest, const SPM_R<T>& hes_rest) : coro_precond_fac<T>(fac_hes_rest), A_bar_diag_(get_block_diagonal(hes_rest)) {} int opt_R(const SPM_R<T>& A) { IF_ERR(return, minimize_norm_of_diff(A)); this->R_->setFromDiagMat(this->R_per_nod_); return 0; } private: VEC_MAT<MAT3<T>> A_diag_; const VEC_MAT<MAT3<T>> A_bar_diag_; int minimize_norm_of_diff(const SPM_R<T>& A) { A_diag_ = get_block_diagonal(A); this->R_per_nod_.resize(A_diag_.size()); const size_t size = A_diag_.size(); #pragma omp parallel for for (size_t i = 0; i < size; ++i) { MAT3<T> A_i = A_diag_[i]; MAT3<T> A_bar_i = A_bar_diag_[i]; MAT3<T> U_i = A_i; VEC3<T> E_i = VEC3<T>::Zero(); eig_jac(A_bar_i, U_i, E_i); MAT3<T> U_bar_i; VEC3<T> E_bar_i; eig_jac(A_bar_i, U_bar_i, E_bar_i); this->R_per_nod_[i] = U_i * U_bar_i.transpose(); } return 0; } }; } // namespace PhysIKA #endif
y_solve.c
//-------------------------------------------------------------------------// // // // This benchmark is an OpenMP C version of the NPB SP code. This OpenMP // // C version is developed by the Center for Manycore Programming at Seoul // // National University and derived from the OpenMP Fortran versions in // // "NPB3.3-OMP" developed by NAS. // // // // Permission to use, copy, distribute and modify this software for any // // purpose with or without fee is hereby granted. This software is // // provided "as is" without express or implied warranty. // // // // Information on NPB 3.3, including the technical report, the original // // specifications, source code, results and information on how to submit // // new results, is available at: // // // // http://www.nas.nasa.gov/Software/NPB/ // // // // Send comments or suggestions for this OpenMP C version to // // cmp@aces.snu.ac.kr // // // // Center for Manycore Programming // // School of Computer Science and Engineering // // Seoul National University // // Seoul 151-744, Korea // // // // E-mail: cmp@aces.snu.ac.kr // // // //-------------------------------------------------------------------------// //-------------------------------------------------------------------------// // Authors: Sangmin Seo, Jungwon Kim, Jun Lee, Jeongho Nah, Gangwon Jo, // // and Jaejin Lee // //-------------------------------------------------------------------------// #include "header.h" //--------------------------------------------------------------------- // this function performs the solution of the approximate factorization // step in the y-direction for all five matrix components // simultaneously. The Thomas algorithm is employed to solve the // systems for the y-lines. Boundary conditions are non-periodic //--------------------------------------------------------------------- void y_solve() { int i, j, k, j1, j2, m; double ru1, fac1, fac2; //kai // int k14; // consistent_data(&k14, "int", 1); if (timeron) timer_start(t_ysolve); #pragma omp parallel for default(shared) private(i,j,k,j1,j2,m, \ ru1,fac1,fac2) for (k = 1; k <= nz2; k++) { lhsinitj(ny2+1, nx2); //--------------------------------------------------------------------- // Computes the left hand side for the three y-factors //--------------------------------------------------------------------- //--------------------------------------------------------------------- // first fill the lhs for the u-eigenvalue //--------------------------------------------------------------------- for (i = 1; i <= grid_points[0]-2; i++) { for (j = 0; j <= grid_points[1]-1; j++) { ru1 = c3c4*rho_i[k][j][i]; cv[j] = vs[k][j][i]; rhoq[j] = max(max(dy3+con43*ru1, dy5+c1c5*ru1), max(dymax+ru1, dy1)); } for (j = 1; j <= grid_points[1]-2; j++) { lhs[j][i][0] = 0.0; lhs[j][i][1] = -dtty2 * cv[j-1] - dtty1 * rhoq[j-1]; lhs[j][i][2] = 1.0 + c2dtty1 * rhoq[j]; lhs[j][i][3] = dtty2 * cv[j+1] - dtty1 * rhoq[j+1]; lhs[j][i][4] = 0.0; } } //--------------------------------------------------------------------- // add fourth order dissipation //--------------------------------------------------------------------- for (i = 1; i <= grid_points[0]-2; i++) { j = 1; lhs[j][i][2] = lhs[j][i][2] + comz5; lhs[j][i][3] = lhs[j][i][3] - comz4; lhs[j][i][4] = lhs[j][i][4] + comz1; lhs[j+1][i][1] = lhs[j+1][i][1] - comz4; lhs[j+1][i][2] = lhs[j+1][i][2] + comz6; lhs[j+1][i][3] = lhs[j+1][i][3] - comz4; lhs[j+1][i][4] = lhs[j+1][i][4] + comz1; } for (j = 3; j <= grid_points[1]-4; j++) { for (i = 1; i <= grid_points[0]-2; i++) { lhs[j][i][0] = lhs[j][i][0] + comz1; lhs[j][i][1] = lhs[j][i][1] - comz4; lhs[j][i][2] = lhs[j][i][2] + comz6; lhs[j][i][3] = lhs[j][i][3] - comz4; lhs[j][i][4] = lhs[j][i][4] + comz1; } } for (i = 1; i <= grid_points[0]-2; i++) { j = grid_points[1]-3; lhs[j][i][0] = lhs[j][i][0] + comz1; lhs[j][i][1] = lhs[j][i][1] - comz4; lhs[j][i][2] = lhs[j][i][2] + comz6; lhs[j][i][3] = lhs[j][i][3] - comz4; lhs[j+1][i][0] = lhs[j+1][i][0] + comz1; lhs[j+1][i][1] = lhs[j+1][i][1] - comz4; lhs[j+1][i][2] = lhs[j+1][i][2] + comz5; } //--------------------------------------------------------------------- // subsequently, for (the other two factors //--------------------------------------------------------------------- for (j = 1; j <= grid_points[1]-2; j++) { for (i = 1; i <= grid_points[0]-2; i++) { lhsp[j][i][0] = lhs[j][i][0]; lhsp[j][i][1] = lhs[j][i][1] - dtty2 * speed[k][j-1][i]; lhsp[j][i][2] = lhs[j][i][2]; lhsp[j][i][3] = lhs[j][i][3] + dtty2 * speed[k][j+1][i]; lhsp[j][i][4] = lhs[j][i][4]; lhsm[j][i][0] = lhs[j][i][0]; lhsm[j][i][1] = lhs[j][i][1] + dtty2 * speed[k][j-1][i]; lhsm[j][i][2] = lhs[j][i][2]; lhsm[j][i][3] = lhs[j][i][3] - dtty2 * speed[k][j+1][i]; lhsm[j][i][4] = lhs[j][i][4]; } } //--------------------------------------------------------------------- // FORWARD ELIMINATION //--------------------------------------------------------------------- for (j = 0; j <= grid_points[1]-3; j++) { j1 = j + 1; j2 = j + 2; for (i = 1; i <= grid_points[0]-2; i++) { fac1 = 1.0/lhs[j][i][2]; lhs[j][i][3] = fac1*lhs[j][i][3]; lhs[j][i][4] = fac1*lhs[j][i][4]; for (m = 0; m < 3; m++) { rhs[k][j][i][m] = fac1*rhs[k][j][i][m]; } lhs[j1][i][2] = lhs[j1][i][2] - lhs[j1][i][1]*lhs[j][i][3]; lhs[j1][i][3] = lhs[j1][i][3] - lhs[j1][i][1]*lhs[j][i][4]; for (m = 0; m < 3; m++) { rhs[k][j1][i][m] = rhs[k][j1][i][m] - lhs[j1][i][1]*rhs[k][j][i][m]; } lhs[j2][i][1] = lhs[j2][i][1] - lhs[j2][i][0]*lhs[j][i][3]; lhs[j2][i][2] = lhs[j2][i][2] - lhs[j2][i][0]*lhs[j][i][4]; for (m = 0; m < 3; m++) { rhs[k][j2][i][m] = rhs[k][j2][i][m] - lhs[j2][i][0]*rhs[k][j][i][m]; } } } //--------------------------------------------------------------------- // The last two rows in this grid block are a bit different, // since they for (not have two more rows available for the // elimination of off-diagonal entries //--------------------------------------------------------------------- j = grid_points[1]-2; j1 = grid_points[1]-1; for (i = 1; i <= grid_points[0]-2; i++) { fac1 = 1.0/lhs[j][i][2]; lhs[j][i][3] = fac1*lhs[j][i][3]; lhs[j][i][4] = fac1*lhs[j][i][4]; for (m = 0; m < 3; m++) { rhs[k][j][i][m] = fac1*rhs[k][j][i][m]; } lhs[j1][i][2] = lhs[j1][i][2] - lhs[j1][i][1]*lhs[j][i][3]; lhs[j1][i][3] = lhs[j1][i][3] - lhs[j1][i][1]*lhs[j][i][4]; for (m = 0; m < 3; m++) { rhs[k][j1][i][m] = rhs[k][j1][i][m] - lhs[j1][i][1]*rhs[k][j][i][m]; } //--------------------------------------------------------------------- // scale the last row immediately //--------------------------------------------------------------------- fac2 = 1.0/lhs[j1][i][2]; for (m = 0; m < 3; m++) { rhs[k][j1][i][m] = fac2*rhs[k][j1][i][m]; } } //--------------------------------------------------------------------- // for (the u+c and the u-c factors //--------------------------------------------------------------------- for (j = 0; j <= grid_points[1]-3; j++) { j1 = j + 1; j2 = j + 2; for (i = 1; i <= grid_points[0]-2; i++) { m = 3; fac1 = 1.0/lhsp[j][i][2]; lhsp[j][i][3] = fac1*lhsp[j][i][3]; lhsp[j][i][4] = fac1*lhsp[j][i][4]; rhs[k][j][i][m] = fac1*rhs[k][j][i][m]; lhsp[j1][i][2] = lhsp[j1][i][2] - lhsp[j1][i][1]*lhsp[j][i][3]; lhsp[j1][i][3] = lhsp[j1][i][3] - lhsp[j1][i][1]*lhsp[j][i][4]; rhs[k][j1][i][m] = rhs[k][j1][i][m] - lhsp[j1][i][1]*rhs[k][j][i][m]; lhsp[j2][i][1] = lhsp[j2][i][1] - lhsp[j2][i][0]*lhsp[j][i][3]; lhsp[j2][i][2] = lhsp[j2][i][2] - lhsp[j2][i][0]*lhsp[j][i][4]; rhs[k][j2][i][m] = rhs[k][j2][i][m] - lhsp[j2][i][0]*rhs[k][j][i][m]; m = 4; fac1 = 1.0/lhsm[j][i][2]; lhsm[j][i][3] = fac1*lhsm[j][i][3]; lhsm[j][i][4] = fac1*lhsm[j][i][4]; rhs[k][j][i][m] = fac1*rhs[k][j][i][m]; lhsm[j1][i][2] = lhsm[j1][i][2] - lhsm[j1][i][1]*lhsm[j][i][3]; lhsm[j1][i][3] = lhsm[j1][i][3] - lhsm[j1][i][1]*lhsm[j][i][4]; rhs[k][j1][i][m] = rhs[k][j1][i][m] - lhsm[j1][i][1]*rhs[k][j][i][m]; lhsm[j2][i][1] = lhsm[j2][i][1] - lhsm[j2][i][0]*lhsm[j][i][3]; lhsm[j2][i][2] = lhsm[j2][i][2] - lhsm[j2][i][0]*lhsm[j][i][4]; rhs[k][j2][i][m] = rhs[k][j2][i][m] - lhsm[j2][i][0]*rhs[k][j][i][m]; } } //--------------------------------------------------------------------- // And again the last two rows separately //--------------------------------------------------------------------- j = grid_points[1]-2; j1 = grid_points[1]-1; for (i = 1; i <= grid_points[0]-2; i++) { m = 3; fac1 = 1.0/lhsp[j][i][2]; lhsp[j][i][3] = fac1*lhsp[j][i][3]; lhsp[j][i][4] = fac1*lhsp[j][i][4]; rhs[k][j][i][m] = fac1*rhs[k][j][i][m]; lhsp[j1][i][2] = lhsp[j1][i][2] - lhsp[j1][i][1]*lhsp[j][i][3]; lhsp[j1][i][3] = lhsp[j1][i][3] - lhsp[j1][i][1]*lhsp[j][i][4]; rhs[k][j1][i][m] = rhs[k][j1][i][m] - lhsp[j1][i][1]*rhs[k][j][i][m]; m = 4; fac1 = 1.0/lhsm[j][i][2]; lhsm[j][i][3] = fac1*lhsm[j][i][3]; lhsm[j][i][4] = fac1*lhsm[j][i][4]; rhs[k][j][i][m] = fac1*rhs[k][j][i][m]; lhsm[j1][i][2] = lhsm[j1][i][2] - lhsm[j1][i][1]*lhsm[j][i][3]; lhsm[j1][i][3] = lhsm[j1][i][3] - lhsm[j1][i][1]*lhsm[j][i][4]; rhs[k][j1][i][m] = rhs[k][j1][i][m] - lhsm[j1][i][1]*rhs[k][j][i][m]; //--------------------------------------------------------------------- // Scale the last row immediately //--------------------------------------------------------------------- rhs[k][j1][i][3] = rhs[k][j1][i][3]/lhsp[j1][i][2]; rhs[k][j1][i][4] = rhs[k][j1][i][4]/lhsm[j1][i][2]; } //--------------------------------------------------------------------- // BACKSUBSTITUTION //--------------------------------------------------------------------- j = grid_points[1]-2; j1 = grid_points[1]-1; for (i = 1; i <= grid_points[0]-2; i++) { for (m = 0; m < 3; m++) { rhs[k][j][i][m] = rhs[k][j][i][m] - lhs[j][i][3]*rhs[k][j1][i][m]; } rhs[k][j][i][3] = rhs[k][j][i][3] - lhsp[j][i][3]*rhs[k][j1][i][3]; rhs[k][j][i][4] = rhs[k][j][i][4] - lhsm[j][i][3]*rhs[k][j1][i][4]; } //--------------------------------------------------------------------- // The first three factors //--------------------------------------------------------------------- for (j = grid_points[1]-3; j >= 0; j--) { j1 = j + 1; j2 = j + 2; for (i = 1; i <= grid_points[0]-2; i++) { for (m = 0; m < 3; m++) { rhs[k][j][i][m] = rhs[k][j][i][m] - lhs[j][i][3]*rhs[k][j1][i][m] - lhs[j][i][4]*rhs[k][j2][i][m]; } //------------------------------------------------------------------- // And the remaining two //------------------------------------------------------------------- rhs[k][j][i][3] = rhs[k][j][i][3] - lhsp[j][i][3]*rhs[k][j1][i][3] - lhsp[j][i][4]*rhs[k][j2][i][3]; rhs[k][j][i][4] = rhs[k][j][i][4] - lhsm[j][i][3]*rhs[k][j1][i][4] - lhsm[j][i][4]*rhs[k][j2][i][4]; } } //kai k14 = k; } if (timeron) timer_stop(t_ysolve); pinvr(); }
HelloOMP.c
#include <stdio.h> #include <omp.h> int main (void) { #pragma omp parallel printf("(%d:!!!Hello world!!!)", omp_get_thread_num()); return (0); }
applu_share.h
#include "npbparams.h" #include "applu_protos.h" #include "RCCE.h" extern double u[5*(isiz1+4)*(isiz2+4)*isiz3], rsd[5*(isiz1+4)*(isiz2+4)*isiz3], frct[5*(isiz1+4)*(isiz2+4)*isiz3], flux[5*(isiz1+2)*(isiz2+2)*isiz3]; extern double a[5*5*isiz1*isiz2], b[5*5*isiz1*isiz2], c[5*5*isiz1*isiz2], d[5*5*isiz1*isiz2]; extern double dt, omega, tolrsd[5], rsdnm[5], errnm[5], frc, ttotal; extern double tolrsd1_def, tolrsd2_def, tolrsd3_def, tolrsd4_def, tolrsd5_def, omega_default; extern double ce[5*13]; extern int ndim, id, num, xdim, ydim, row, col; extern int ii1, ii2, ji1, ji2, ki1, ki2; extern int itmax, invert; extern int ipr, ipr_default, inorm; extern int north,south,east,west; extern int nx0, ny0, nz0; extern int nx, ny, nz; extern int ist, iend, jst, jend, ipt, jpt; extern int dp_type; extern double tx1, ty1, tz1, dx1, dy1, dz1, tx2, ty2, tz2, dx2, dy2, dz2, tx3, ty3, tz3, dx3, dy3, dz3, dx4, dy4, dz4, dx5, dy5, dz5, dssp, c1, c2, c3, c4, c5; extern double dxi, deta, dzeta; extern double npmax, maxtime; extern RCCE_FLAG flagsent[4], flagready[4]; extern double *buf1_exch_1; #ifdef _OPENMP #pragma omp threadprivate (nx, ny, nz, nx0, ny0, nz0, \ ipt, ist, iend, jpt, jst, jend, \ ii1, ii2, ji1, ji2, ki1, ki2, \ dxi, deta, dzeta, \ tx1, tx2, tx3, ty1, ty2, ty3, tz1, tz2, tz3) #pragma omp threadprivate (dx1, dx2, dx3, dx4, dx5, \ dy1, dy2, dy3, dy4, dy5, \ dz1, dz2, dz3, dz4, dz5, \ dssp) #pragma omp threadprivate(u, rsd, frct, flux) #pragma omp threadprivate(ipr, inorm) #pragma omp threadprivate(itmax, invert, \ dt, omega, tolrsd, rsdnm, errnm, frc, ttotal, \ a, b, c, d) #pragma omp threadprivate(ce) #pragma omp threadprivate (id, ndim, num, xdim, ydim, row, col, \ north,south,east,west, flagsent, flagready, \ buf1_exch_1, npmax, maxtime) #endif
morn_wave.c
/* Copyright (C) 2019-2020 JingWeiZhangHuai <jingweizhanghuai@163.com> Licensed under the Apache License, Version 2.0; you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "morn_wave.h" struct HandleWaveCreate { MWave *wave; MChain *property; int channel; int size; float *index[MORN_MAX_WAVE_CN]; MMemory *memory; }; void endWaveCreate(struct HandleWaveCreate *handle) { mException((handle->wave == NULL),EXIT,"invalid wave"); if(handle->property!=NULL) mChainRelease(handle->property); if(handle->memory != NULL) mMemoryRelease(handle->memory); memset(handle->wave,0,sizeof(MWave)); // mFree(((MList **)(handle->wave))-1); } #define HASH_WaveCreate 0xa08b9c64 MWave *mWaveCreate(int cn,int size,float **data) { MWave *wave = (MWave *)ObjectAlloc(sizeof(MWave)); MHandle *hdl = mHandle(wave,WaveCreate); struct HandleWaveCreate *handle = (struct HandleWaveCreate *)(hdl->handle); handle->wave = wave; if(size <0) size = 0; if(cn <0) cn = 0; mException((cn>MORN_MAX_WAVE_CN),EXIT,"invalid input"); wave->size = size; wave->channel = cn; if((size == 0)||(cn==0)) { mException((!INVALID_POINTER(data)),EXIT,"invalid input"); memset(wave->data,0,MORN_MAX_WAVE_CN*sizeof(float *)); } else if(INVALID_POINTER(data)) { size = size +32; void **index[MORN_MAX_WAVE_CN];for(int i=0;i<cn;i++) index[i]=(void **)(&(handle->index[i])); if(handle->memory == NULL) handle->memory = mMemoryCreate(cn,size*sizeof(float),MORN_HOST); mMemoryIndex(handle->memory,1,size*sizeof(float),index,cn); handle->size = size; handle->channel = cn; for(int k=0;k<cn;k++) wave->data[k] = handle->index[k]+16; } else memcpy(wave->data,data,sizeof(float *)*cn); return wave; } void mWaveRelease(MWave *wave) { ObjectFree(wave); } void mWaveRedefine(MWave *src,int cn,int size,float **data) { mException((INVALID_POINTER(src)),EXIT,"invalid input"); if(size <= 0) size = src->size; if(cn <= 0) cn = src->channel; if((cn!=src->channel)||(size!=src->size)) mHandleReset(src); int same_size = ((size <= src->size)&&(cn <= src->channel)); int reuse = (data==src->data); int flag = (src->size)&&(src->channel); src->size = size; src->channel = cn; if(same_size&&reuse) return; struct HandleWaveCreate *handle = (struct HandleWaveCreate *)(ObjHandle(src,0)->handle); if(same_size&&(data==NULL)&&(handle->size >0)) return; mException(reuse&&flag&&(handle->size==0),EXIT,"invalid redefine"); mException((cn>MORN_MAX_WAVE_CN),EXIT,"invalid input"); handle->size = 0; if((cn<=0)||(size<=0)) { mException((data!=NULL)&&(!reuse),EXIT,"invalid input"); memset(src->data,0,MORN_MAX_WAVE_CN*sizeof(float *)); return; } if(reuse) data=NULL; if(data!=NULL) {memcpy(src->data,data,cn*sizeof(float *));return;} if((size > handle->size)||(cn > handle->channel)) { size = size +32; void **index[MORN_MAX_WAVE_CN];for(int i=0;i<cn;i++) index[i]=(void **)(&(handle->index[i])); if(handle->memory == NULL) handle->memory = mMemoryCreate(cn,size*sizeof(float),MORN_HOST); else mMemoryRedefine(handle->memory,cn,size*sizeof(float),MORN_HOST); mMemoryIndex(handle->memory,1,size*sizeof(float),index,cn); handle->size = size; handle->channel = cn; } for(int k=0;k<cn;k++) src->data[k] = handle->index[k]+16; } void mWaveCut(MWave *src,MWave *dst,int locate,int size) { int cn; mException(INVALID_WAVE(src),EXIT,"invalid input"); if(locate < 0) locate = 0; if(size < 0) { if(!INVALID_WAVE(dst)) size = dst->size; else size = src->size -locate; } if(INVALID_POINTER(dst)) dst = src; mException((locate+size>src->size),EXIT,"invalid input"); if((locate == 0)&&(size == src->size)&&(dst==src)) return; if(dst != src) { mWaveRedefine(dst,src->channel,size,dst->data); // dst->info = src->info; } for(cn=0;cn<src->channel;cn++) memcpy(dst->data[cn],src->data[cn]+locate,size*sizeof(float)); } void mWavMean(MWave *src,float *mean) { int wav_size; int i,j; float sum; mException((INVALID_WAVE(src))||(INVALID_POINTER(mean)),EXIT,"invalid input"); sum = 0.0; wav_size = src->size; for(j=0;j<src->channel;j++) { for(i=0;i<wav_size;i++) sum = sum + src->data[j][i]; mean[j] = sum/((float)wav_size); } } void mWavABSMean(MWave *src,float *mean) { int wav_size; int i,j; float sum; float **data; mException((INVALID_WAVE(src))||(INVALID_POINTER(mean)),EXIT,"invalid input"); data = src->data; sum = 0.0; wav_size = src->size; for(j=0;j<src->channel;j++) { for(i=0;i<wav_size;i++) sum = (data[j][i]>0)?(sum+data[j][i]):(sum-data[j][i]); mean[j] = sum/((float)wav_size); } } void mWavSquarMean(MWave *src,float *mean) { int wav_size; int i,j; float sum; float **data; mException((INVALID_WAVE(src))||(INVALID_POINTER(mean)),EXIT,"invalid input"); sum = 0.0; data = src->data; wav_size = src->size; for(j=0;j<src->channel;j++) { for(i=0;i<wav_size;i++) sum = sum + data[j][i]*data[j][i]; mean[j] = sum/((float)wav_size); } } void mWaveAdd(MWave *src1,MWave *src2,MWave *dst) { int wav_size; int cn,i; mException((INVALID_WAVE(src1))||(INVALID_WAVE(src2)),EXIT,"invalid input"); wav_size = (src1->size<src2->size)?src1->size:src2->size; mException((src2->channel != src1->channel)&&(src2->channel!=1),EXIT,"invalid input"); if(INVALID_POINTER(dst)) dst = src1; // dst->info = src1->info; float normalize=MORN_NOT_NORMALIZED;mPropertyWrite(dst,"normalize",&normalize,sizeof(float)); mWaveRedefine(dst,src1->channel,wav_size,dst->data); if(src2->channel == 0) { for(cn = 0;cn<src1->channel;cn++) for(i=0;i<wav_size;i++) dst->data[cn][i] = src1->data[cn][i] + src2->data[cn][0]; } else { for(cn = 0;cn<src1->channel;cn++) for(i=0;i<wav_size;i++) dst->data[cn][i] = src1->data[cn][i] + src2->data[cn][i]; } } void mWaveSub(MWave *src1,MWave *src2,MWave *dst) { int wav_size; int cn,i; mException((INVALID_WAVE(src1))||(INVALID_WAVE(src2)),EXIT,"invalid input"); wav_size = (src1->size<src2->size)?src1->size:src2->size; mException((src2->channel != src1->channel)&&(src2->channel !=1),EXIT,"invalid input"); if(INVALID_POINTER(dst)) dst = src1; // dst->info = src1->info; float normalize=MORN_NOT_NORMALIZED;mPropertyWrite(dst,"normalize",&normalize,sizeof(float)); mWaveRedefine(dst,src1->channel,wav_size,dst->data); if(src2->channel == 1) { for(cn = 0;cn<src1->channel;cn++) for(i=0;i<wav_size;i++) dst->data[cn][i] = src1->data[cn][i] - src2->data[cn][0]; } else { for(cn = 0;cn<src1->channel;cn++) for(i=0;i<wav_size;i++) dst->data[cn][i] = src1->data[cn][i] - src2->data[cn][i]; } } void mWaveAverage(MWave *src1,MWave *src2,MWave *dst) { int wav_size; int cn,i; mException((INVALID_WAVE(src1))||(INVALID_WAVE(src2)),EXIT,"invalid input"); wav_size = (src1->size<src2->size)?src1->size:src2->size; mException((src2->channel != src1->channel)&&(src2->channel != 1),EXIT,"invalid input"); if(INVALID_POINTER(dst)) dst = src1; // dst->info = src1->info; float normalize=MORN_NOT_NORMALIZED;mPropertyWrite(dst,"normalize",&normalize,sizeof(float)); mWaveRedefine(dst,src1->channel,wav_size,dst->data); if(src2->channel == 1) { for(cn = 0;cn<src1->channel;cn++) for(i=0;i<wav_size;i++) dst->data[cn][i] = (src1->data[cn][i] + src2->data[cn][0])/2.0f; } else { for(cn = 0;cn<src1->channel;cn++) for(i=0;i<wav_size;i++) dst->data[cn][i] = (src1->data[cn][i] + src2->data[cn][i])/2.0f; } } void mWaveWeightedAverage(MWave *src1,MWave *src2,MWave *dst,float weight1,float weight2) { int wav_size; int cn,i; mException((INVALID_WAVE(src1))||(INVALID_WAVE(src2)),EXIT,"invalid input"); wav_size = (src1->size<src2->size)?src1->size:src2->size; mException((src2->channel != src1->channel)&&(src2->channel !=1),EXIT,"invalid input"); if((weight1 == MORN_DEFAULT)&&(weight2 == MORN_DEFAULT)) { mWaveAverage(src1,src2,dst); return; } else if((weight1 == MORN_DEFAULT)&&(weight2 < 1.0f)&&(weight2 > 0.0f)) weight1 = 1.0f - weight2; else if((weight2 == MORN_DEFAULT)&&(weight1 < 1.0f)&&(weight1 > 0.0f)) weight2 = 1.0f - weight1; else if((weight1 == MORN_DEFAULT)||(weight2 == MORN_DEFAULT)) mException(1,EXIT,"invalid input"); if(INVALID_POINTER(dst)) dst = src1; // dst->info = src1->info; float normalize=MORN_NOT_NORMALIZED;mPropertyWrite(dst,"normalize",&normalize,sizeof(float)); mWaveRedefine(dst,src1->channel,wav_size,dst->data); if(src2->channel == 1) { for(cn = 0;cn<src1->channel;cn++) for(i=0;i<wav_size;i++) dst->data[cn][i] = (src1->data[cn][i]*weight1 + src2->data[cn][0]*weight2)/(weight1+weight2); } else { for(cn = 0;cn<src1->channel;cn++) for(i=0;i<wav_size;i++) dst->data[cn][i] = (src1->data[cn][i]*weight1 + src2->data[cn][i]*weight2)/(weight1+weight2); } } void mWaveScale(MWave *src,MWave *dst,float k) { int wav_size; int cn,i; mException((INVALID_WAVE(src)),EXIT,"invalid input"); wav_size = src->size; if(INVALID_POINTER(dst)) dst = src; // dst->info = src->info; float normalize=MORN_NOT_NORMALIZED;mPropertyWrite(dst,"normalize",&normalize,sizeof(float)); mWaveRedefine(dst,src->channel,wav_size,dst->data); for(cn = 0;cn<src->channel;cn++) for(i=0;i<wav_size;i++) dst->data[cn][i] = src->data[cn][i]*k; } void mWaveMul(MWave *src1,MWave *src2,MWave *dst) { int wav_size; int cn,i; mException((INVALID_WAVE(src1))||(INVALID_WAVE(src2)),EXIT,"invalid input"); wav_size = (src1->size<src2->size)?src1->size:src2->size; mException((src2->channel != src1->channel)&&(src2->channel !=1),EXIT,"invalid input"); if(INVALID_POINTER(dst)) dst = src1; // dst->info = src1->info; float normalize=MORN_NOT_NORMALIZED;mPropertyWrite(dst,"normalize",&normalize,sizeof(float)); mWaveRedefine(dst,src1->channel,wav_size,dst->data); if(src2->channel == 1) { for(cn = 0;cn<src1->channel;cn++) for(i=0;i<wav_size;i++) dst->data[cn][i] = src1->data[cn][i] * src2->data[0][i]; } else { for(cn = 0;cn<src1->channel;cn++) for(i=0;i<wav_size;i++) dst->data[cn][i] = src1->data[cn][i] * src2->data[cn][i]; } } void mWaveDiv(MWave *src1,MWave *src2,MWave *dst) { int wav_size; int cn,i; mException((INVALID_WAVE(src1))||(INVALID_WAVE(src2)),EXIT,"invalid input"); wav_size = (src1->size<src2->size)?src1->size:src2->size; mException((src2->channel != src1->channel)&&(src2->channel !=1),EXIT,"invalid input"); if(INVALID_POINTER(dst)) dst = src1; // dst->info = src1->info; float normalize=MORN_NOT_NORMALIZED;mPropertyWrite(dst,"normalize",&normalize,sizeof(float)); mWaveRedefine(dst,src1->channel,wav_size,dst->data); for(cn = 0;cn<src1->channel;cn++) for(i=0;i<wav_size;i++) dst->data[cn][i] = src1->data[cn][i] / src2->data[cn][i]; } void mWaveOperate(MWave *src,MWave *dst,float (*func)(float,void *),void *para) { int i; mException((INVALID_WAVE(src)),EXIT,"invalid input"); if(INVALID_POINTER(dst)) dst = src; else mWaveRedefine(dst,src->channel,src->size,dst->data); for(int cn=0;cn<src->channel;cn++) { #pragma omp parallel for for(i=0;i<src->size;i++) dst->data[cn][i] = func(src->data[cn][i],para); } }
kpoint.c
/* Copyright (C) 2008 Atsushi Togo */ /* All rights reserved. */ /* This file is part of spglib. */ /* Redistribution and use in source and binary forms, with or without */ /* modification, are permitted provided that the following conditions */ /* are met: */ /* * Redistributions of source code must retain the above copyright */ /* notice, this list of conditions and the following disclaimer. */ /* * Redistributions in binary form must reproduce the above copyright */ /* notice, this list of conditions and the following disclaimer in */ /* the documentation and/or other materials provided with the */ /* distribution. */ /* * Neither the name of the phonopy project nor the names of its */ /* contributors may be used to endorse or promote products derived */ /* from this software without specific prior written permission. */ /* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */ /* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */ /* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */ /* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */ /* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ /* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */ /* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */ /* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */ /* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */ /* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */ /* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ /* POSSIBILITY OF SUCH DAMAGE. */ #include <stdio.h> #include <stdlib.h> #include <stddef.h> #include "mathfunc.h" #include "kpoint.h" #include "kgrid.h" #ifdef KPTWARNING #include <stdio.h> #define warning_print(...) fprintf(stderr,__VA_ARGS__) #else #define warning_print(...) #endif #define KPT_NUM_BZ_SEARCH_SPACE 125 static int bz_search_space[KPT_NUM_BZ_SEARCH_SPACE][3] = { { 0, 0, 0}, { 0, 0, 1}, { 0, 0, 2}, { 0, 0, -2}, { 0, 0, -1}, { 0, 1, 0}, { 0, 1, 1}, { 0, 1, 2}, { 0, 1, -2}, { 0, 1, -1}, { 0, 2, 0}, { 0, 2, 1}, { 0, 2, 2}, { 0, 2, -2}, { 0, 2, -1}, { 0, -2, 0}, { 0, -2, 1}, { 0, -2, 2}, { 0, -2, -2}, { 0, -2, -1}, { 0, -1, 0}, { 0, -1, 1}, { 0, -1, 2}, { 0, -1, -2}, { 0, -1, -1}, { 1, 0, 0}, { 1, 0, 1}, { 1, 0, 2}, { 1, 0, -2}, { 1, 0, -1}, { 1, 1, 0}, { 1, 1, 1}, { 1, 1, 2}, { 1, 1, -2}, { 1, 1, -1}, { 1, 2, 0}, { 1, 2, 1}, { 1, 2, 2}, { 1, 2, -2}, { 1, 2, -1}, { 1, -2, 0}, { 1, -2, 1}, { 1, -2, 2}, { 1, -2, -2}, { 1, -2, -1}, { 1, -1, 0}, { 1, -1, 1}, { 1, -1, 2}, { 1, -1, -2}, { 1, -1, -1}, { 2, 0, 0}, { 2, 0, 1}, { 2, 0, 2}, { 2, 0, -2}, { 2, 0, -1}, { 2, 1, 0}, { 2, 1, 1}, { 2, 1, 2}, { 2, 1, -2}, { 2, 1, -1}, { 2, 2, 0}, { 2, 2, 1}, { 2, 2, 2}, { 2, 2, -2}, { 2, 2, -1}, { 2, -2, 0}, { 2, -2, 1}, { 2, -2, 2}, { 2, -2, -2}, { 2, -2, -1}, { 2, -1, 0}, { 2, -1, 1}, { 2, -1, 2}, { 2, -1, -2}, { 2, -1, -1}, {-2, 0, 0}, {-2, 0, 1}, {-2, 0, 2}, {-2, 0, -2}, {-2, 0, -1}, {-2, 1, 0}, {-2, 1, 1}, {-2, 1, 2}, {-2, 1, -2}, {-2, 1, -1}, {-2, 2, 0}, {-2, 2, 1}, {-2, 2, 2}, {-2, 2, -2}, {-2, 2, -1}, {-2, -2, 0}, {-2, -2, 1}, {-2, -2, 2}, {-2, -2, -2}, {-2, -2, -1}, {-2, -1, 0}, {-2, -1, 1}, {-2, -1, 2}, {-2, -1, -2}, {-2, -1, -1}, {-1, 0, 0}, {-1, 0, 1}, {-1, 0, 2}, {-1, 0, -2}, {-1, 0, -1}, {-1, 1, 0}, {-1, 1, 1}, {-1, 1, 2}, {-1, 1, -2}, {-1, 1, -1}, {-1, 2, 0}, {-1, 2, 1}, {-1, 2, 2}, {-1, 2, -2}, {-1, 2, -1}, {-1, -2, 0}, {-1, -2, 1}, {-1, -2, 2}, {-1, -2, -2}, {-1, -2, -1}, {-1, -1, 0}, {-1, -1, 1}, {-1, -1, 2}, {-1, -1, -2}, {-1, -1, -1} }; static MatINT *get_point_group_reciprocal(const MatINT * rotations, const int is_time_reversal); static MatINT *get_point_group_reciprocal_with_q(const MatINT * rot_reciprocal, const double symprec, const size_t num_q, SPGCONST double qpoints[][3]); static size_t get_dense_ir_reciprocal_mesh(int grid_address[][3], size_t ir_mapping_table[], const int mesh[3], const int is_shift[3], const MatINT *rot_reciprocal); static size_t get_dense_ir_reciprocal_mesh_normal(int grid_address[][3], size_t ir_mapping_table[], const int mesh[3], const int is_shift[3], const MatINT *rot_reciprocal); static size_t get_dense_ir_reciprocal_mesh_distortion(int grid_address[][3], size_t ir_mapping_table[], const int mesh[3], const int is_shift[3], const MatINT *rot_reciprocal); static size_t get_dense_num_ir(size_t ir_mapping_table[], const int mesh[3]); static size_t relocate_dense_BZ_grid_address(int bz_grid_address[][3], size_t bz_map[], SPGCONST int grid_address[][3], const int mesh[3], SPGCONST double rec_lattice[3][3], const int is_shift[3]); static double get_tolerance_for_BZ_reduction(SPGCONST double rec_lattice[3][3], const int mesh[3]); static int check_mesh_symmetry(const int mesh[3], const int is_shift[3], const MatINT *rot_reciprocal); /* grid_address (e.g. 4x4x4 mesh, unless GRID_ORDER_XYZ is defined) */ /* [[ 0 0 0] */ /* [ 1 0 0] */ /* [ 2 0 0] */ /* [-1 0 0] */ /* [ 0 1 0] */ /* [ 1 1 0] */ /* [ 2 1 0] */ /* [-1 1 0] */ /* .... ] */ /* */ /* Each value of 'map' correspnds to the index of grid_point. */ int kpt_get_irreducible_reciprocal_mesh(int grid_address[][3], int ir_mapping_table[], const int mesh[3], const int is_shift[3], const MatINT *rot_reciprocal) { int num_ir; size_t i; size_t *dense_ir_mapping_table; if ((dense_ir_mapping_table = (size_t*)malloc(sizeof(size_t) * mesh[0] * mesh[1] * mesh[2])) == NULL) { warning_print("spglib: Memory of unique_rot could not be allocated."); return 0; } num_ir = kpt_get_dense_irreducible_reciprocal_mesh(grid_address, dense_ir_mapping_table, mesh, is_shift, rot_reciprocal); for (i = 0; i < mesh[0] * mesh[1] * mesh[2]; i++) { ir_mapping_table[i] = dense_ir_mapping_table[i]; } free(dense_ir_mapping_table); dense_ir_mapping_table = NULL; return num_ir; } size_t kpt_get_dense_irreducible_reciprocal_mesh(int grid_address[][3], size_t ir_mapping_table[], const int mesh[3], const int is_shift[3], const MatINT *rot_reciprocal) { size_t num_ir; num_ir = get_dense_ir_reciprocal_mesh(grid_address, ir_mapping_table, mesh, is_shift, rot_reciprocal); return num_ir; } int kpt_get_stabilized_reciprocal_mesh(int grid_address[][3], int ir_mapping_table[], const int mesh[3], const int is_shift[3], const int is_time_reversal, const MatINT * rotations, const size_t num_q, SPGCONST double qpoints[][3]) { int num_ir; size_t i; size_t *dense_ir_mapping_table; if ((dense_ir_mapping_table = (size_t*)malloc(sizeof(size_t) * mesh[0] * mesh[1] * mesh[2])) == NULL) { warning_print("spglib: Memory of unique_rot could not be allocated."); return 0; } num_ir = kpt_get_dense_stabilized_reciprocal_mesh(grid_address, dense_ir_mapping_table, mesh, is_shift, is_time_reversal, rotations, num_q, qpoints); for (i = 0; i < mesh[0] * mesh[1] * mesh[2]; i++) { ir_mapping_table[i] = dense_ir_mapping_table[i]; } free(dense_ir_mapping_table); dense_ir_mapping_table = NULL; return num_ir; } size_t kpt_get_dense_stabilized_reciprocal_mesh(int grid_address[][3], size_t ir_mapping_table[], const int mesh[3], const int is_shift[3], const int is_time_reversal, const MatINT * rotations, const size_t num_q, SPGCONST double qpoints[][3]) { size_t num_ir; MatINT *rot_reciprocal, *rot_reciprocal_q; double tolerance; rot_reciprocal = NULL; rot_reciprocal_q = NULL; rot_reciprocal = get_point_group_reciprocal(rotations, is_time_reversal); tolerance = 0.01 / (mesh[0] + mesh[1] + mesh[2]); rot_reciprocal_q = get_point_group_reciprocal_with_q(rot_reciprocal, tolerance, num_q, qpoints); num_ir = get_dense_ir_reciprocal_mesh(grid_address, ir_mapping_table, mesh, is_shift, rot_reciprocal_q); mat_free_MatINT(rot_reciprocal_q); rot_reciprocal_q = NULL; mat_free_MatINT(rot_reciprocal); rot_reciprocal = NULL; return num_ir; } void kpt_get_dense_grid_points_by_rotations(size_t rot_grid_points[], const int address_orig[3], SPGCONST int (*rot_reciprocal)[3][3], const int num_rot, const int mesh[3], const int is_shift[3]) { int i; int address_double_orig[3], address_double[3]; for (i = 0; i < 3; i++) { address_double_orig[i] = address_orig[i] * 2 + is_shift[i]; } for (i = 0; i < num_rot; i++) { mat_multiply_matrix_vector_i3(address_double, rot_reciprocal[i], address_double_orig); rot_grid_points[i] = kgd_get_dense_grid_point_double_mesh(address_double, mesh); } } void kpt_get_dense_BZ_grid_points_by_rotations(size_t rot_grid_points[], const int address_orig[3], SPGCONST int (*rot_reciprocal)[3][3], const int num_rot, const int mesh[3], const int is_shift[3], const size_t bz_map[]) { int i; int address_double_orig[3], address_double[3], bzmesh[3]; for (i = 0; i < 3; i++) { bzmesh[i] = mesh[i] * 2; address_double_orig[i] = address_orig[i] * 2 + is_shift[i]; } for (i = 0; i < num_rot; i++) { mat_multiply_matrix_vector_i3(address_double, rot_reciprocal[i], address_double_orig); rot_grid_points[i] = bz_map[kgd_get_dense_grid_point_double_mesh(address_double, bzmesh)]; } } int kpt_relocate_BZ_grid_address(int bz_grid_address[][3], int bz_map[], SPGCONST int grid_address[][3], const int mesh[3], SPGCONST double rec_lattice[3][3], const int is_shift[3]) { int i, num_bz_map, num_bzgp; size_t *dense_bz_map; num_bz_map = mesh[0] * mesh[1] * mesh[2] * 8; if ((dense_bz_map = (size_t*)malloc(sizeof(size_t) * num_bz_map)) == NULL) { warning_print("spglib: Memory of unique_rot could not be allocated."); return 0; } num_bzgp = kpt_relocate_dense_BZ_grid_address(bz_grid_address, dense_bz_map, grid_address, mesh, rec_lattice, is_shift); for (i = 0; i < num_bz_map; i++) { if (dense_bz_map[i] == num_bz_map) { bz_map[i] = -1; } else { bz_map[i] = dense_bz_map[i]; } } free(dense_bz_map); dense_bz_map = NULL; return num_bzgp; } size_t kpt_relocate_dense_BZ_grid_address(int bz_grid_address[][3], size_t bz_map[], SPGCONST int grid_address[][3], const int mesh[3], SPGCONST double rec_lattice[3][3], const int is_shift[3]) { return relocate_dense_BZ_grid_address(bz_grid_address, bz_map, grid_address, mesh, rec_lattice, is_shift); } MatINT *kpt_get_point_group_reciprocal(const MatINT * rotations, const int is_time_reversal) { return get_point_group_reciprocal(rotations, is_time_reversal); } MatINT *kpt_get_point_group_reciprocal_with_q(const MatINT * rot_reciprocal, const double symprec, const size_t num_q, SPGCONST double qpoints[][3]) { return get_point_group_reciprocal_with_q(rot_reciprocal, symprec, num_q, qpoints); } /* Return NULL if failed */ static MatINT *get_point_group_reciprocal(const MatINT * rotations, const int is_time_reversal) { int i, j, num_rot; MatINT *rot_reciprocal, *rot_return; int *unique_rot; SPGCONST int inversion[3][3] = { {-1, 0, 0 }, { 0,-1, 0 }, { 0, 0,-1 } }; rot_reciprocal = NULL; rot_return = NULL; unique_rot = NULL; if (is_time_reversal) { if ((rot_reciprocal = mat_alloc_MatINT(rotations->size * 2)) == NULL) { return NULL; } } else { if ((rot_reciprocal = mat_alloc_MatINT(rotations->size)) == NULL) { return NULL; } } if ((unique_rot = (int*)malloc(sizeof(int) * rot_reciprocal->size)) == NULL) { warning_print("spglib: Memory of unique_rot could not be allocated."); mat_free_MatINT(rot_reciprocal); rot_reciprocal = NULL; return NULL; } for (i = 0; i < rot_reciprocal->size; i++) { unique_rot[i] = -1; } for (i = 0; i < rotations->size; i++) { mat_transpose_matrix_i3(rot_reciprocal->mat[i], rotations->mat[i]); if (is_time_reversal) { mat_multiply_matrix_i3(rot_reciprocal->mat[rotations->size+i], inversion, rot_reciprocal->mat[i]); } } num_rot = 0; for (i = 0; i < rot_reciprocal->size; i++) { for (j = 0; j < num_rot; j++) { if (mat_check_identity_matrix_i3(rot_reciprocal->mat[unique_rot[j]], rot_reciprocal->mat[i])) { goto escape; } } unique_rot[num_rot] = i; num_rot++; escape: ; } if ((rot_return = mat_alloc_MatINT(num_rot)) != NULL) { for (i = 0; i < num_rot; i++) { mat_copy_matrix_i3(rot_return->mat[i], rot_reciprocal->mat[unique_rot[i]]); } } free(unique_rot); unique_rot = NULL; mat_free_MatINT(rot_reciprocal); rot_reciprocal = NULL; return rot_return; } /* Return NULL if failed */ static MatINT *get_point_group_reciprocal_with_q(const MatINT * rot_reciprocal, const double symprec, const size_t num_q, SPGCONST double qpoints[][3]) { int i, j, k, l, is_all_ok, num_rot; int *ir_rot; double q_rot[3], diff[3]; MatINT * rot_reciprocal_q; ir_rot = NULL; rot_reciprocal_q = NULL; is_all_ok = 0; num_rot = 0; if ((ir_rot = (int*)malloc(sizeof(int) * rot_reciprocal->size)) == NULL) { warning_print("spglib: Memory of ir_rot could not be allocated."); return NULL; } for (i = 0; i < rot_reciprocal->size; i++) { ir_rot[i] = -1; } for (i = 0; i < rot_reciprocal->size; i++) { for (j = 0; j < num_q; j++) { is_all_ok = 0; mat_multiply_matrix_vector_id3(q_rot, rot_reciprocal->mat[i], qpoints[j]); for (k = 0; k < num_q; k++) { for (l = 0; l < 3; l++) { diff[l] = q_rot[l] - qpoints[k][l]; diff[l] -= mat_Nint(diff[l]); } if (mat_Dabs(diff[0]) < symprec && mat_Dabs(diff[1]) < symprec && mat_Dabs(diff[2]) < symprec) { is_all_ok = 1; break; } } if (! is_all_ok) { break; } } if (is_all_ok) { ir_rot[num_rot] = i; num_rot++; } } if ((rot_reciprocal_q = mat_alloc_MatINT(num_rot)) != NULL) { for (i = 0; i < num_rot; i++) { mat_copy_matrix_i3(rot_reciprocal_q->mat[i], rot_reciprocal->mat[ir_rot[i]]); } } free(ir_rot); ir_rot = NULL; return rot_reciprocal_q; } static size_t get_dense_ir_reciprocal_mesh(int grid_address[][3], size_t ir_mapping_table[], const int mesh[3], const int is_shift[3], const MatINT *rot_reciprocal) { if (check_mesh_symmetry(mesh, is_shift, rot_reciprocal)) { return get_dense_ir_reciprocal_mesh_normal(grid_address, ir_mapping_table, mesh, is_shift, rot_reciprocal); } else { return get_dense_ir_reciprocal_mesh_distortion(grid_address, ir_mapping_table, mesh, is_shift, rot_reciprocal); } } static size_t get_dense_ir_reciprocal_mesh_normal(int grid_address[][3], size_t ir_mapping_table[], const int mesh[3], const int is_shift[3], const MatINT *rot_reciprocal) { /* In the following loop, mesh is doubled. */ /* Even and odd mesh numbers correspond to */ /* is_shift[i] are 0 or 1, respectively. */ /* is_shift = [0,0,0] gives Gamma center mesh. */ /* grid: reducible grid points */ /* ir_mapping_table: the mapping from each point to ir-point. */ size_t i, grid_point_rot; int j; int address_double[3], address_double_rot[3]; kgd_get_all_grid_addresses(grid_address, mesh); #pragma omp parallel for private(j, grid_point_rot, address_double, address_double_rot) for (i = 0; i < mesh[0] * mesh[1] * (size_t)(mesh[2]); i++) { kgd_get_grid_address_double_mesh(address_double, grid_address[i], mesh, is_shift); ir_mapping_table[i] = i; for (j = 0; j < rot_reciprocal->size; j++) { mat_multiply_matrix_vector_i3(address_double_rot, rot_reciprocal->mat[j], address_double); grid_point_rot = kgd_get_dense_grid_point_double_mesh(address_double_rot, mesh); if (grid_point_rot < ir_mapping_table[i]) { #ifdef _OPENMP ir_mapping_table[i] = grid_point_rot; #else ir_mapping_table[i] = ir_mapping_table[grid_point_rot]; break; #endif } } } return get_dense_num_ir(ir_mapping_table, mesh); } static size_t get_dense_ir_reciprocal_mesh_distortion(int grid_address[][3], size_t ir_mapping_table[], const int mesh[3], const int is_shift[3], const MatINT *rot_reciprocal) { size_t i, grid_point_rot; int j, k, indivisible; int address_double[3], address_double_rot[3], divisor[3]; kgd_get_all_grid_addresses(grid_address, mesh); for (j = 0; j < 3; j++) { divisor[j] = mesh[(j + 1) % 3] * mesh[(j + 2) % 3]; } #pragma omp parallel for private(j, k, grid_point_rot, address_double, address_double_rot) for (i = 0; i < mesh[0] * mesh[1] * (size_t)(mesh[2]); i++) { kgd_get_grid_address_double_mesh(address_double, grid_address[i], mesh, is_shift); for (j = 0; j < 3; j++) { address_double[j] *= divisor[j]; } ir_mapping_table[i] = i; for (j = 0; j < rot_reciprocal->size; j++) { mat_multiply_matrix_vector_i3(address_double_rot, rot_reciprocal->mat[j], address_double); for (k = 0; k < 3; k++) { indivisible = address_double_rot[k] % divisor[k]; if (indivisible) {break;} address_double_rot[k] /= divisor[k]; if ((address_double_rot[k] % 2 != 0 && is_shift[k] == 0) || (address_double_rot[k] % 2 == 0 && is_shift[k] == 1)) { indivisible = 1; break; } } if (indivisible) {continue;} grid_point_rot = kgd_get_dense_grid_point_double_mesh(address_double_rot, mesh); if (grid_point_rot < ir_mapping_table[i]) { #ifdef _OPENMP ir_mapping_table[i] = grid_point_rot; #else ir_mapping_table[i] = ir_mapping_table[grid_point_rot]; break; #endif } } } return get_dense_num_ir(ir_mapping_table, mesh); } static size_t get_dense_num_ir(size_t ir_mapping_table[], const int mesh[3]) { size_t i, num_ir; num_ir = 0; #pragma omp parallel for reduction(+:num_ir) for (i = 0; i < mesh[0] * mesh[1] * (size_t)(mesh[2]); i++) { if (ir_mapping_table[i] == i) { num_ir++; } } #ifdef _OPENMP for (i = 0; i < mesh[0] * mesh[1] * (size_t)(mesh[2]); i++) { ir_mapping_table[i] = ir_mapping_table[ir_mapping_table[i]]; } #endif return num_ir; } static size_t relocate_dense_BZ_grid_address(int bz_grid_address[][3], size_t bz_map[], SPGCONST int grid_address[][3], const int mesh[3], SPGCONST double rec_lattice[3][3], const int is_shift[3]) { double tolerance, min_distance; double q_vector[3], distance[KPT_NUM_BZ_SEARCH_SPACE]; int bzmesh[3], bz_address_double[3]; size_t i, boundary_num_gp, total_num_gp, bzgp, gp, num_bzmesh; int j, k, min_index; tolerance = get_tolerance_for_BZ_reduction(rec_lattice, mesh); for (j = 0; j < 3; j++) { bzmesh[j] = mesh[j] * 2; } num_bzmesh = bzmesh[0] * bzmesh[1] * (size_t)(bzmesh[2]); for (i = 0; i < num_bzmesh; i++) { bz_map[i] = num_bzmesh; } boundary_num_gp = 0; total_num_gp = mesh[0] * mesh[1] * (size_t)(mesh[2]); /* Multithreading doesn't work for this loop since gp calculated */ /* with boundary_num_gp is unstable to store bz_grid_address. */ for (i = 0; i < total_num_gp; i++) { for (j = 0; j < KPT_NUM_BZ_SEARCH_SPACE; j++) { for (k = 0; k < 3; k++) { q_vector[k] = ((grid_address[i][k] + bz_search_space[j][k] * mesh[k]) * 2 + is_shift[k]) / ((double)mesh[k]) / 2; } mat_multiply_matrix_vector_d3(q_vector, rec_lattice, q_vector); distance[j] = mat_norm_squared_d3(q_vector); } min_distance = distance[0]; min_index = 0; for (j = 1; j < KPT_NUM_BZ_SEARCH_SPACE; j++) { if (distance[j] < min_distance) { min_distance = distance[j]; min_index = j; } } for (j = 0; j < KPT_NUM_BZ_SEARCH_SPACE; j++) { if (distance[j] < min_distance + tolerance) { if (j == min_index) { gp = i; } else { gp = boundary_num_gp + total_num_gp; } for (k = 0; k < 3; k++) { bz_grid_address[gp][k] = grid_address[i][k] + bz_search_space[j][k] * mesh[k]; bz_address_double[k] = bz_grid_address[gp][k] * 2 + is_shift[k]; } bzgp = kgd_get_dense_grid_point_double_mesh(bz_address_double, bzmesh); bz_map[bzgp] = gp; if (j != min_index) { boundary_num_gp++; } } } } return boundary_num_gp + total_num_gp; } static double get_tolerance_for_BZ_reduction(SPGCONST double rec_lattice[3][3], const int mesh[3]) { int i, j; double tolerance; double length[3]; for (i = 0; i < 3; i++) { length[i] = 0; for (j = 0; j < 3; j++) { length[i] += rec_lattice[j][i] * rec_lattice[j][i]; } length[i] /= mesh[i] * mesh[i]; } tolerance = length[0]; for (i = 1; i < 3; i++) { if (tolerance < length[i]) { tolerance = length[i]; } } tolerance *= 0.01; return tolerance; } static int check_mesh_symmetry(const int mesh[3], const int is_shift[3], const MatINT *rot_reciprocal) { int i; int eq[3]; eq[0] = 0; /* a=b */ eq[1] = 0; /* b=c */ eq[2] = 0; /* c=a */ for (i = 0; i < rot_reciprocal->size; i++) { if (rot_reciprocal->mat[i][0][0] == 0 && rot_reciprocal->mat[i][1][0] == 1 && rot_reciprocal->mat[i][2][0] == 0) {eq[0] = 1;} if (rot_reciprocal->mat[i][0][0] == 0 && rot_reciprocal->mat[i][1][0] == 0 && rot_reciprocal->mat[i][2][0] == 1) {eq[2] = 1;} if (rot_reciprocal->mat[i][0][1] == 0 && rot_reciprocal->mat[i][1][1] == 0 && rot_reciprocal->mat[i][2][1] == 1) {eq[1] = 1;} } return (((eq[0] && mesh[0] == mesh[1] && is_shift[0] == is_shift[1]) || (!eq[0])) && ((eq[1] && mesh[1] == mesh[2] && is_shift[1] == is_shift[2]) || (!eq[1])) && ((eq[2] && mesh[2] == mesh[0] && is_shift[2] == is_shift[0]) || (!eq[2]))); }
GB_binop__minus_fp32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__minus_fp32 // A.*B function (eWiseMult): GB_AemultB__minus_fp32 // A*D function (colscale): GB_AxD__minus_fp32 // D*A function (rowscale): GB_DxB__minus_fp32 // C+=B function (dense accum): GB_Cdense_accumB__minus_fp32 // C+=b function (dense accum): GB_Cdense_accumb__minus_fp32 // C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__minus_fp32 // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__minus_fp32 // C=scalar+B GB_bind1st__minus_fp32 // C=scalar+B' GB_bind1st_tran__minus_fp32 // C=A+scalar GB_bind2nd__minus_fp32 // C=A'+scalar GB_bind2nd_tran__minus_fp32 // C type: float // A type: float // B,b type: float // BinaryOp: cij = (aij - bij) #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ float // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ float bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ float t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x - y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 1 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ GB_cblas_saxpy // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINUS || GxB_NO_FP32 || GxB_NO_MINUS_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB_Cdense_ewise3_accum__minus_fp32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__minus_fp32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__minus_fp32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__minus_fp32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__minus_fp32 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *GB_RESTRICT Cx = (float *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__minus_fp32 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *GB_RESTRICT Cx = (float *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__minus_fp32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__minus_fp32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__minus_fp32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *Cx = (float *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; float bij = Bx [p] ; Cx [p] = (x - bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__minus_fp32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; float *Cx = (float *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; float aij = Ax [p] ; Cx [p] = (aij - y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = Ax [pA] ; \ Cx [pC] = (x - aij) ; \ } GrB_Info GB_bind1st_tran__minus_fp32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = Ax [pA] ; \ Cx [pC] = (aij - y) ; \ } GrB_Info GB_bind2nd_tran__minus_fp32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__minus_uint8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__minus_uint8) // A.*B function (eWiseMult): GB (_AemultB_01__minus_uint8) // A.*B function (eWiseMult): GB (_AemultB_02__minus_uint8) // A.*B function (eWiseMult): GB (_AemultB_03__minus_uint8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__minus_uint8) // A*D function (colscale): GB (_AxD__minus_uint8) // D*A function (rowscale): GB (_DxB__minus_uint8) // C+=B function (dense accum): GB (_Cdense_accumB__minus_uint8) // C+=b function (dense accum): GB (_Cdense_accumb__minus_uint8) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__minus_uint8) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__minus_uint8) // C=scalar+B GB (_bind1st__minus_uint8) // C=scalar+B' GB (_bind1st_tran__minus_uint8) // C=A+scalar GB (_bind2nd__minus_uint8) // C=A'+scalar GB (_bind2nd_tran__minus_uint8) // C type: uint8_t // A type: uint8_t // B,b type: uint8_t // BinaryOp: cij = (aij - bij) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ uint8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint8_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint8_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x - y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINUS || GxB_NO_UINT8 || GxB_NO_MINUS_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__minus_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__minus_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__minus_uint8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__minus_uint8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__minus_uint8) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__minus_uint8) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__minus_uint8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__minus_uint8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__minus_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__minus_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__minus_uint8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__minus_uint8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint8_t bij = GBX (Bx, p, false) ; Cx [p] = (x - bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__minus_uint8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint8_t aij = GBX (Ax, p, false) ; Cx [p] = (aij - y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x - aij) ; \ } GrB_Info GB (_bind1st_tran__minus_uint8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij - y) ; \ } GrB_Info GB (_bind2nd_tran__minus_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
ordered-3.c
/* { dg-do compile } */ /* { dg-options "-fopenmp" } */ void foo (void) { int i; #pragma omp for ordered for (i = 0; i < 64; i++) { #pragma omp ordered ; } #pragma omp for ordered for (i = 0; i < 64; i++) { #pragma omp ordered threads ; } #pragma omp for ordered for (i = 0; i < 64; i++) { #pragma omp ordered threads threads /* { dg-error "too many .threads. clauses" } */ ; } #pragma omp simd for (i = 0; i < 64; i++) { #pragma omp ordered simd ; } #pragma omp simd for (i = 0; i < 64; i++) { #pragma omp ordered simd simd /* { dg-error "too many .simd. clauses" } */ ; } #pragma omp for simd ordered for (i = 0; i < 64; i++) { #pragma omp ordered threads, simd ; } #pragma omp for simd ordered for (i = 0; i < 64; i++) { #pragma omp ordered threads, simd, threads, simd /* { dg-error "too many .threads. clauses" } */ ; /* { dg-error "too many .simd. clauses" "" { target *-*-* } .-1 } */ } #pragma omp for simd ordered(1) /* { dg-error ".ordered. clause with parameter may not be specified on .#pragma omp for simd. construct" } */ for (i = 0; i < 64; i++) { #pragma omp ordered depend(sink: i - 1) /* { dg-error "clause must be closely nested inside a loop with .ordered. clause with a parameter" } */ #pragma omp ordered depend(source) /* { dg-error "clause must be closely nested inside a loop with .ordered. clause with a parameter" } */ } #pragma omp parallel for simd ordered(1) /* { dg-error ".ordered. clause with parameter may not be specified on .#pragma omp parallel for simd. construct" } */ for (i = 0; i < 64; i++) { #pragma omp ordered depend(sink: i - 1) /* { dg-error "clause must be closely nested inside a loop with .ordered. clause with a parameter" } */ #pragma omp ordered depend(source) /* { dg-error "clause must be closely nested inside a loop with .ordered. clause with a parameter" } */ } #pragma omp parallel for ordered for (i = 0; i < 64; i++) { #pragma omp ordered depend(sink: i - 1) /* { dg-error "clause must be closely nested inside a loop with .ordered. clause with a parameter" } */ #pragma omp ordered depend(source) /* { dg-error "clause must be closely nested inside a loop with .ordered. clause with a parameter" } */ } #pragma omp parallel for for (i = 0; i < 64; i++) { #pragma omp ordered depend(sink: i - 1) /* { dg-error "clause must be closely nested inside a loop with .ordered. clause with a parameter" } */ #pragma omp ordered depend(source) /* { dg-error "clause must be closely nested inside a loop with .ordered. clause with a parameter" } */ } } void bar (int x) { switch (x) { case 0: #pragma omp ordered ; break; case 1: #pragma omp ordered threads ; break; case 2: #pragma omp ordered threads, threads /* { dg-error "too many .threads. clauses" } */ ; break; } } void baz (void) { #pragma omp ordered simd ; #pragma omp ordered simd, simd /* { dg-error "too many .simd. clauses" } */ ; }
decl2.c
/* Process declarations and variables for C++ compiler. Copyright (C) 1988-2020 Free Software Foundation, Inc. Hacked by Michael Tiemann (tiemann@cygnus.com) This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ /* Process declarations and symbol lookup for C++ front end. Also constructs types; the standard scalar types at initialization, and structure, union, array and enum types when they are declared. */ /* ??? not all decl nodes are given the most useful possible line numbers. For example, the CONST_DECLs for enum values. */ #include "config.h" #include "system.h" #include "coretypes.h" #include "memmodel.h" #include "target.h" #include "cp-tree.h" #include "c-family/c-common.h" #include "timevar.h" #include "stringpool.h" #include "cgraph.h" #include "varasm.h" #include "attribs.h" #include "stor-layout.h" #include "calls.h" #include "decl.h" #include "toplev.h" #include "c-family/c-objc.h" #include "c-family/c-pragma.h" #include "dumpfile.h" #include "intl.h" #include "c-family/c-ada-spec.h" #include "asan.h" /* Id for dumping the raw trees. */ int raw_dump_id; extern cpp_reader *parse_in; /* This structure contains information about the initializations and/or destructions required for a particular priority level. */ typedef struct priority_info_s { /* Nonzero if there have been any initializations at this priority throughout the translation unit. */ int initializations_p; /* Nonzero if there have been any destructions at this priority throughout the translation unit. */ int destructions_p; } *priority_info; static void mark_vtable_entries (tree); static bool maybe_emit_vtables (tree); static tree start_objects (int, int); static void finish_objects (int, int, tree); static tree start_static_storage_duration_function (unsigned); static void finish_static_storage_duration_function (tree); static priority_info get_priority_info (int); static void do_static_initialization_or_destruction (tree, bool); static void one_static_initialization_or_destruction (tree, tree, bool); static void generate_ctor_or_dtor_function (bool, int, location_t *); static int generate_ctor_and_dtor_functions_for_priority (splay_tree_node, void *); static tree prune_vars_needing_no_initialization (tree *); static void write_out_vars (tree); static void import_export_class (tree); static tree get_guard_bits (tree); static void determine_visibility_from_class (tree, tree); static bool determine_hidden_inline (tree); static void maybe_instantiate_decl (tree); /* A list of static class variables. This is needed, because a static class variable can be declared inside the class without an initializer, and then initialized, statically, outside the class. */ static GTY(()) vec<tree, va_gc> *pending_statics; /* A list of functions which were declared inline, but which we may need to emit outline anyway. */ static GTY(()) vec<tree, va_gc> *deferred_fns; /* A list of decls that use types with no linkage, which we need to make sure are defined. */ static GTY(()) vec<tree, va_gc> *no_linkage_decls; /* A vector of alternating decls and identifiers, where the latter is to be an alias for the former if the former is defined. */ static GTY(()) vec<tree, va_gc> *mangling_aliases; /* hash traits for declarations. Hashes single decls via DECL_ASSEMBLER_NAME_RAW. */ struct mangled_decl_hash : ggc_remove <tree> { typedef tree value_type; /* A DECL. */ typedef tree compare_type; /* An identifier. */ static hashval_t hash (const value_type decl) { return IDENTIFIER_HASH_VALUE (DECL_ASSEMBLER_NAME_RAW (decl)); } static bool equal (const value_type existing, compare_type candidate) { tree name = DECL_ASSEMBLER_NAME_RAW (existing); return candidate == name; } static const bool empty_zero_p = true; static inline void mark_empty (value_type &p) {p = NULL_TREE;} static inline bool is_empty (value_type p) {return !p;} static bool is_deleted (value_type e) { return e == reinterpret_cast <value_type> (1); } static void mark_deleted (value_type &e) { e = reinterpret_cast <value_type> (1); } }; /* A hash table of decls keyed by mangled name. Used to figure out if we need compatibility aliases. */ static GTY(()) hash_table<mangled_decl_hash> *mangled_decls; /* Nonzero if we're done parsing and into end-of-file activities. */ int at_eof; /* True if note_mangling_alias should enqueue mangling aliases for later generation, rather than emitting them right away. */ bool defer_mangling_aliases = true; /* Return a member function type (a METHOD_TYPE), given FNTYPE (a FUNCTION_TYPE), CTYPE (class type), and QUALS (the cv-qualifiers that apply to the function). */ tree build_memfn_type (tree fntype, tree ctype, cp_cv_quals quals, cp_ref_qualifier rqual) { if (fntype == error_mark_node || ctype == error_mark_node) return error_mark_node; gcc_assert (FUNC_OR_METHOD_TYPE_P (fntype)); cp_cv_quals type_quals = quals & ~TYPE_QUAL_RESTRICT; ctype = cp_build_qualified_type (ctype, type_quals); tree newtype = build_method_type_directly (ctype, TREE_TYPE (fntype), (TREE_CODE (fntype) == METHOD_TYPE ? TREE_CHAIN (TYPE_ARG_TYPES (fntype)) : TYPE_ARG_TYPES (fntype))); if (tree attrs = TYPE_ATTRIBUTES (fntype)) newtype = cp_build_type_attribute_variant (newtype, attrs); newtype = build_cp_fntype_variant (newtype, rqual, TYPE_RAISES_EXCEPTIONS (fntype), TYPE_HAS_LATE_RETURN_TYPE (fntype)); return newtype; } /* Return a variant of FNTYPE, a FUNCTION_TYPE or METHOD_TYPE, with its return type changed to NEW_RET. */ tree change_return_type (tree new_ret, tree fntype) { if (new_ret == error_mark_node) return fntype; if (same_type_p (new_ret, TREE_TYPE (fntype))) return fntype; tree newtype; tree args = TYPE_ARG_TYPES (fntype); if (TREE_CODE (fntype) == FUNCTION_TYPE) { newtype = build_function_type (new_ret, args); newtype = apply_memfn_quals (newtype, type_memfn_quals (fntype)); } else newtype = build_method_type_directly (class_of_this_parm (fntype), new_ret, TREE_CHAIN (args)); if (tree attrs = TYPE_ATTRIBUTES (fntype)) newtype = cp_build_type_attribute_variant (newtype, attrs); newtype = cxx_copy_lang_qualifiers (newtype, fntype); return newtype; } /* Build a PARM_DECL of FN with NAME and TYPE, and set DECL_ARG_TYPE appropriately. */ tree cp_build_parm_decl (tree fn, tree name, tree type) { tree parm = build_decl (input_location, PARM_DECL, name, type); DECL_CONTEXT (parm) = fn; /* DECL_ARG_TYPE is only used by the back end and the back end never sees templates. */ if (!processing_template_decl) DECL_ARG_TYPE (parm) = type_passed_as (type); return parm; } /* Returns a PARM_DECL of FN for a parameter of the indicated TYPE, with the indicated NAME. */ tree build_artificial_parm (tree fn, tree name, tree type) { tree parm = cp_build_parm_decl (fn, name, type); DECL_ARTIFICIAL (parm) = 1; /* All our artificial parms are implicitly `const'; they cannot be assigned to. */ TREE_READONLY (parm) = 1; return parm; } /* Constructors for types with virtual baseclasses need an "in-charge" flag saying whether this constructor is responsible for initialization of virtual baseclasses or not. All destructors also need this "in-charge" flag, which additionally determines whether or not the destructor should free the memory for the object. This function adds the "in-charge" flag to member function FN if appropriate. It is called from grokclassfn and tsubst. FN must be either a constructor or destructor. The in-charge flag follows the 'this' parameter, and is followed by the VTT parm (if any), then the user-written parms. */ void maybe_retrofit_in_chrg (tree fn) { tree basetype, arg_types, parms, parm, fntype; /* If we've already add the in-charge parameter don't do it again. */ if (DECL_HAS_IN_CHARGE_PARM_P (fn)) return; /* When processing templates we can't know, in general, whether or not we're going to have virtual baseclasses. */ if (processing_template_decl) return; /* We don't need an in-charge parameter for constructors that don't have virtual bases. */ if (DECL_CONSTRUCTOR_P (fn) && !CLASSTYPE_VBASECLASSES (DECL_CONTEXT (fn))) return; arg_types = TYPE_ARG_TYPES (TREE_TYPE (fn)); basetype = TREE_TYPE (TREE_VALUE (arg_types)); arg_types = TREE_CHAIN (arg_types); parms = DECL_CHAIN (DECL_ARGUMENTS (fn)); /* If this is a subobject constructor or destructor, our caller will pass us a pointer to our VTT. */ if (CLASSTYPE_VBASECLASSES (DECL_CONTEXT (fn))) { parm = build_artificial_parm (fn, vtt_parm_identifier, vtt_parm_type); /* First add it to DECL_ARGUMENTS between 'this' and the real args... */ DECL_CHAIN (parm) = parms; parms = parm; /* ...and then to TYPE_ARG_TYPES. */ arg_types = hash_tree_chain (vtt_parm_type, arg_types); DECL_HAS_VTT_PARM_P (fn) = 1; } /* Then add the in-charge parm (before the VTT parm). */ parm = build_artificial_parm (fn, in_charge_identifier, integer_type_node); DECL_CHAIN (parm) = parms; parms = parm; arg_types = hash_tree_chain (integer_type_node, arg_types); /* Insert our new parameter(s) into the list. */ DECL_CHAIN (DECL_ARGUMENTS (fn)) = parms; /* And rebuild the function type. */ fntype = build_method_type_directly (basetype, TREE_TYPE (TREE_TYPE (fn)), arg_types); if (TYPE_ATTRIBUTES (TREE_TYPE (fn))) fntype = (cp_build_type_attribute_variant (fntype, TYPE_ATTRIBUTES (TREE_TYPE (fn)))); fntype = cxx_copy_lang_qualifiers (fntype, TREE_TYPE (fn)); TREE_TYPE (fn) = fntype; /* Now we've got the in-charge parameter. */ DECL_HAS_IN_CHARGE_PARM_P (fn) = 1; } /* Classes overload their constituent function names automatically. When a function name is declared in a record structure, its name is changed to it overloaded name. Since names for constructors and destructors can conflict, we place a leading '$' for destructors. CNAME is the name of the class we are grokking for. FUNCTION is a FUNCTION_DECL. It was created by `grokdeclarator'. FLAGS contains bits saying what's special about today's arguments. DTOR_FLAG == DESTRUCTOR. If FUNCTION is a destructor, then we must add the `auto-delete' field as a second parameter. There is some hair associated with the fact that we must "declare" this variable in the manner consistent with the way the rest of the arguments were declared. QUALS are the qualifiers for the this pointer. */ void grokclassfn (tree ctype, tree function, enum overload_flags flags) { tree fn_name = DECL_NAME (function); /* Even within an `extern "C"' block, members get C++ linkage. See [dcl.link] for details. */ SET_DECL_LANGUAGE (function, lang_cplusplus); if (fn_name == NULL_TREE) { error ("name missing for member function"); fn_name = get_identifier ("<anonymous>"); DECL_NAME (function) = fn_name; } DECL_CONTEXT (function) = ctype; if (flags == DTOR_FLAG) DECL_CXX_DESTRUCTOR_P (function) = 1; if (flags == DTOR_FLAG || DECL_CONSTRUCTOR_P (function)) maybe_retrofit_in_chrg (function); } /* Create an ARRAY_REF, checking for the user doing things backwards along the way. DECLTYPE_P is for N3276, as in the parser. */ tree grok_array_decl (location_t loc, tree array_expr, tree index_exp, bool decltype_p) { tree type; tree expr; tree orig_array_expr = array_expr; tree orig_index_exp = index_exp; tree overload = NULL_TREE; if (error_operand_p (array_expr) || error_operand_p (index_exp)) return error_mark_node; if (processing_template_decl) { if (type_dependent_expression_p (array_expr) || type_dependent_expression_p (index_exp)) return build_min_nt_loc (loc, ARRAY_REF, array_expr, index_exp, NULL_TREE, NULL_TREE); array_expr = build_non_dependent_expr (array_expr); index_exp = build_non_dependent_expr (index_exp); } type = TREE_TYPE (array_expr); gcc_assert (type); type = non_reference (type); /* If they have an `operator[]', use that. */ if (MAYBE_CLASS_TYPE_P (type) || MAYBE_CLASS_TYPE_P (TREE_TYPE (index_exp))) { tsubst_flags_t complain = tf_warning_or_error; if (decltype_p) complain |= tf_decltype; expr = build_new_op (loc, ARRAY_REF, LOOKUP_NORMAL, array_expr, index_exp, NULL_TREE, &overload, complain); } else { tree p1, p2, i1, i2; bool swapped = false; /* Otherwise, create an ARRAY_REF for a pointer or array type. It is a little-known fact that, if `a' is an array and `i' is an int, you can write `i[a]', which means the same thing as `a[i]'. */ if (TREE_CODE (type) == ARRAY_TYPE || VECTOR_TYPE_P (type)) p1 = array_expr; else p1 = build_expr_type_conversion (WANT_POINTER, array_expr, false); if (TREE_CODE (TREE_TYPE (index_exp)) == ARRAY_TYPE) p2 = index_exp; else p2 = build_expr_type_conversion (WANT_POINTER, index_exp, false); i1 = build_expr_type_conversion (WANT_INT | WANT_ENUM, array_expr, false); i2 = build_expr_type_conversion (WANT_INT | WANT_ENUM, index_exp, false); if ((p1 && i2) && (i1 && p2)) error ("ambiguous conversion for array subscript"); if (p1 && i2) array_expr = p1, index_exp = i2; else if (i1 && p2) swapped = true, array_expr = p2, index_exp = i1; else { error_at (loc, "invalid types %<%T[%T]%> for array subscript", type, TREE_TYPE (index_exp)); return error_mark_node; } if (array_expr == error_mark_node || index_exp == error_mark_node) error ("ambiguous conversion for array subscript"); if (TYPE_PTR_P (TREE_TYPE (array_expr))) array_expr = mark_rvalue_use (array_expr); else array_expr = mark_lvalue_use_nonread (array_expr); index_exp = mark_rvalue_use (index_exp); if (swapped && flag_strong_eval_order == 2 && (TREE_SIDE_EFFECTS (array_expr) || TREE_SIDE_EFFECTS (index_exp))) expr = build_array_ref (input_location, index_exp, array_expr); else expr = build_array_ref (input_location, array_expr, index_exp); } if (processing_template_decl && expr != error_mark_node) { if (overload != NULL_TREE) return (build_min_non_dep_op_overload (ARRAY_REF, expr, overload, orig_array_expr, orig_index_exp)); return build_min_non_dep (ARRAY_REF, expr, orig_array_expr, orig_index_exp, NULL_TREE, NULL_TREE); } return expr; } /* Given the cast expression EXP, checking out its validity. Either return an error_mark_node if there was an unavoidable error, return a cast to void for trying to delete a pointer w/ the value 0, or return the call to delete. If DOING_VEC is true, we handle things differently for doing an array delete. Implements ARM $5.3.4. This is called from the parser. */ tree delete_sanity (location_t loc, tree exp, tree size, bool doing_vec, int use_global_delete, tsubst_flags_t complain) { tree t, type; if (exp == error_mark_node) return exp; if (processing_template_decl) { t = build_min (DELETE_EXPR, void_type_node, exp, size); DELETE_EXPR_USE_GLOBAL (t) = use_global_delete; DELETE_EXPR_USE_VEC (t) = doing_vec; TREE_SIDE_EFFECTS (t) = 1; SET_EXPR_LOCATION (t, loc); return t; } location_t exp_loc = cp_expr_loc_or_loc (exp, loc); /* An array can't have been allocated by new, so complain. */ if (TREE_CODE (TREE_TYPE (exp)) == ARRAY_TYPE && (complain & tf_warning)) warning_at (exp_loc, 0, "deleting array %q#E", exp); t = build_expr_type_conversion (WANT_POINTER, exp, true); if (t == NULL_TREE || t == error_mark_node) { if (complain & tf_error) error_at (exp_loc, "type %q#T argument given to %<delete%>, expected pointer", TREE_TYPE (exp)); return error_mark_node; } type = TREE_TYPE (t); /* As of Valley Forge, you can delete a pointer to const. */ /* You can't delete functions. */ if (TREE_CODE (TREE_TYPE (type)) == FUNCTION_TYPE) { if (complain & tf_error) error_at (exp_loc, "cannot delete a function. Only pointer-to-objects are " "valid arguments to %<delete%>"); return error_mark_node; } /* Deleting ptr to void is undefined behavior [expr.delete/3]. */ if (VOID_TYPE_P (TREE_TYPE (type))) { if (complain & tf_warning) warning_at (exp_loc, OPT_Wdelete_incomplete, "deleting %qT is undefined", type); doing_vec = 0; } /* Deleting a pointer with the value zero is valid and has no effect. */ if (integer_zerop (t)) return build1_loc (loc, NOP_EXPR, void_type_node, t); if (doing_vec) return build_vec_delete (loc, t, /*maxindex=*/NULL_TREE, sfk_deleting_destructor, use_global_delete, complain); else return build_delete (loc, type, t, sfk_deleting_destructor, LOOKUP_NORMAL, use_global_delete, complain); } /* Report an error if the indicated template declaration is not the sort of thing that should be a member template. */ void check_member_template (tree tmpl) { tree decl; gcc_assert (TREE_CODE (tmpl) == TEMPLATE_DECL); decl = DECL_TEMPLATE_RESULT (tmpl); if (TREE_CODE (decl) == FUNCTION_DECL || DECL_ALIAS_TEMPLATE_P (tmpl) || (TREE_CODE (decl) == TYPE_DECL && MAYBE_CLASS_TYPE_P (TREE_TYPE (decl)))) { /* The parser rejects template declarations in local classes (with the exception of generic lambdas). */ gcc_assert (!current_function_decl || LAMBDA_FUNCTION_P (decl)); /* The parser rejects any use of virtual in a function template. */ gcc_assert (!(TREE_CODE (decl) == FUNCTION_DECL && DECL_VIRTUAL_P (decl))); /* The debug-information generating code doesn't know what to do with member templates. */ DECL_IGNORED_P (tmpl) = 1; } else if (variable_template_p (tmpl)) /* OK */; else error ("template declaration of %q#D", decl); } /* Sanity check: report error if this function FUNCTION is not really a member of the class (CTYPE) it is supposed to belong to. TEMPLATE_PARMS is used to specify the template parameters of a member template passed as FUNCTION_DECL. If the member template is passed as a TEMPLATE_DECL, it can be NULL since the parameters can be extracted from the declaration. If the function is not a function template, it must be NULL. It returns the original declaration for the function, NULL_TREE if no declaration was found, error_mark_node if an error was emitted. */ tree check_classfn (tree ctype, tree function, tree template_parms) { if (DECL_USE_TEMPLATE (function) && !(TREE_CODE (function) == TEMPLATE_DECL && DECL_TEMPLATE_SPECIALIZATION (function)) && DECL_MEMBER_TEMPLATE_P (DECL_TI_TEMPLATE (function))) /* Since this is a specialization of a member template, we're not going to find the declaration in the class. For example, in: struct S { template <typename T> void f(T); }; template <> void S::f(int); we're not going to find `S::f(int)', but there's no reason we should, either. We let our callers know we didn't find the method, but we don't complain. */ return NULL_TREE; /* Basic sanity check: for a template function, the template parameters either were not passed, or they are the same of DECL_TEMPLATE_PARMS. */ if (TREE_CODE (function) == TEMPLATE_DECL) { if (template_parms && !comp_template_parms (template_parms, DECL_TEMPLATE_PARMS (function))) { error ("template parameter lists provided don%'t match the " "template parameters of %qD", function); return error_mark_node; } template_parms = DECL_TEMPLATE_PARMS (function); } /* OK, is this a definition of a member template? */ bool is_template = (template_parms != NULL_TREE); /* [temp.mem] A destructor shall not be a member template. */ if (DECL_DESTRUCTOR_P (function) && is_template) { error ("destructor %qD declared as member template", function); return error_mark_node; } /* We must enter the scope here, because conversion operators are named by target type, and type equivalence relies on typenames resolving within the scope of CTYPE. */ tree pushed_scope = push_scope (ctype); tree matched = NULL_TREE; tree fns = get_class_binding (ctype, DECL_NAME (function)); for (ovl_iterator iter (fns); !matched && iter; ++iter) { tree fndecl = *iter; /* A member template definition only matches a member template declaration. */ if (is_template != (TREE_CODE (fndecl) == TEMPLATE_DECL)) continue; if (!DECL_DECLARES_FUNCTION_P (fndecl)) continue; tree p1 = TYPE_ARG_TYPES (TREE_TYPE (function)); tree p2 = TYPE_ARG_TYPES (TREE_TYPE (fndecl)); /* We cannot simply call decls_match because this doesn't work for static member functions that are pretending to be methods, and because the name may have been changed by asm("new_name"). */ /* Get rid of the this parameter on functions that become static. */ if (DECL_STATIC_FUNCTION_P (fndecl) && TREE_CODE (TREE_TYPE (function)) == METHOD_TYPE) p1 = TREE_CHAIN (p1); /* ref-qualifier or absence of same must match. */ if (type_memfn_rqual (TREE_TYPE (function)) != type_memfn_rqual (TREE_TYPE (fndecl))) continue; // Include constraints in the match. tree c1 = get_constraints (function); tree c2 = get_constraints (fndecl); /* While finding a match, same types and params are not enough if the function is versioned. Also check version ("target") attributes. */ if (same_type_p (TREE_TYPE (TREE_TYPE (function)), TREE_TYPE (TREE_TYPE (fndecl))) && compparms (p1, p2) && !targetm.target_option.function_versions (function, fndecl) && (!is_template || comp_template_parms (template_parms, DECL_TEMPLATE_PARMS (fndecl))) && equivalent_constraints (c1, c2) && (DECL_TEMPLATE_SPECIALIZATION (function) == DECL_TEMPLATE_SPECIALIZATION (fndecl)) && (!DECL_TEMPLATE_SPECIALIZATION (function) || (DECL_TI_TEMPLATE (function) == DECL_TI_TEMPLATE (fndecl)))) matched = fndecl; } if (!matched) { if (!COMPLETE_TYPE_P (ctype)) cxx_incomplete_type_error (DECL_SOURCE_LOCATION (function), function, ctype); else { if (DECL_CONV_FN_P (function)) fns = get_class_binding (ctype, conv_op_identifier); error_at (DECL_SOURCE_LOCATION (function), "no declaration matches %q#D", function); if (fns) print_candidates (fns); else if (DECL_CONV_FN_P (function)) inform (DECL_SOURCE_LOCATION (function), "no conversion operators declared"); else inform (DECL_SOURCE_LOCATION (function), "no functions named %qD", function); inform (DECL_SOURCE_LOCATION (TYPE_NAME (ctype)), "%#qT defined here", ctype); } matched = error_mark_node; } if (pushed_scope) pop_scope (pushed_scope); return matched; } /* DECL is a function with vague linkage. Remember it so that at the end of the translation unit we can decide whether or not to emit it. */ void note_vague_linkage_fn (tree decl) { if (processing_template_decl) return; DECL_DEFER_OUTPUT (decl) = 1; vec_safe_push (deferred_fns, decl); } /* As above, but for variable template instantiations. */ void note_variable_template_instantiation (tree decl) { vec_safe_push (pending_statics, decl); } /* We have just processed the DECL, which is a static data member. The other parameters are as for cp_finish_decl. */ void finish_static_data_member_decl (tree decl, tree init, bool init_const_expr_p, tree asmspec_tree, int flags) { if (DECL_TEMPLATE_INSTANTIATED (decl)) /* We already needed to instantiate this, so the processing in this function is unnecessary/wrong. */ return; DECL_CONTEXT (decl) = current_class_type; /* We cannot call pushdecl here, because that would fill in the TREE_CHAIN of our decl. Instead, we modify cp_finish_decl to do the right thing, namely, to put this decl out straight away. */ if (! processing_template_decl) vec_safe_push (pending_statics, decl); if (LOCAL_CLASS_P (current_class_type) /* We already complained about the template definition. */ && !DECL_TEMPLATE_INSTANTIATION (decl)) permerror (DECL_SOURCE_LOCATION (decl), "local class %q#T shall not have static data member %q#D", current_class_type, decl); else for (tree t = current_class_type; TYPE_P (t); t = CP_TYPE_CONTEXT (t)) if (TYPE_UNNAMED_P (t)) { auto_diagnostic_group d; if (permerror (DECL_SOURCE_LOCATION (decl), "static data member %qD in unnamed class", decl)) inform (DECL_SOURCE_LOCATION (TYPE_NAME (t)), "unnamed class defined here"); break; } if (DECL_INLINE_VAR_P (decl) && !DECL_TEMPLATE_INSTANTIATION (decl)) /* An inline variable is immediately defined, so don't set DECL_IN_AGGR_P. Except that if decl is a template instantiation, it isn't defined until instantiate_decl. */; else DECL_IN_AGGR_P (decl) = 1; if (TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE && TYPE_DOMAIN (TREE_TYPE (decl)) == NULL_TREE) SET_VAR_HAD_UNKNOWN_BOUND (decl); if (init) { /* Similarly to start_decl_1, we want to complete the type in order to do the right thing in cp_apply_type_quals_to_decl, possibly clear TYPE_QUAL_CONST (c++/65579). */ tree type = TREE_TYPE (decl) = complete_type (TREE_TYPE (decl)); cp_apply_type_quals_to_decl (cp_type_quals (type), decl); } cp_finish_decl (decl, init, init_const_expr_p, asmspec_tree, flags); } /* DECLARATOR and DECLSPECS correspond to a class member. The other parameters are as for cp_finish_decl. Return the DECL for the class member declared. */ tree grokfield (const cp_declarator *declarator, cp_decl_specifier_seq *declspecs, tree init, bool init_const_expr_p, tree asmspec_tree, tree attrlist) { tree value; const char *asmspec = 0; int flags; if (init && TREE_CODE (init) == TREE_LIST && TREE_VALUE (init) == error_mark_node && TREE_CHAIN (init) == NULL_TREE) init = NULL_TREE; value = grokdeclarator (declarator, declspecs, FIELD, init != 0, &attrlist); if (! value || value == error_mark_node) /* friend or constructor went bad. */ return error_mark_node; if (TREE_TYPE (value) == error_mark_node) return value; if (TREE_CODE (value) == TYPE_DECL && init) { error_at (cp_expr_loc_or_loc (init, DECL_SOURCE_LOCATION (value)), "typedef %qD is initialized (use %qs instead)", value, "decltype"); init = NULL_TREE; } /* Pass friendly classes back. */ if (value == void_type_node) return value; if (DECL_NAME (value) && TREE_CODE (DECL_NAME (value)) == TEMPLATE_ID_EXPR) { error_at (declarator->id_loc, "explicit template argument list not allowed"); return error_mark_node; } /* Stash away type declarations. */ if (TREE_CODE (value) == TYPE_DECL) { DECL_NONLOCAL (value) = 1; DECL_CONTEXT (value) = current_class_type; if (attrlist) { int attrflags = 0; /* If this is a typedef that names the class for linkage purposes (7.1.3p8), apply any attributes directly to the type. */ if (OVERLOAD_TYPE_P (TREE_TYPE (value)) && value == TYPE_NAME (TYPE_MAIN_VARIANT (TREE_TYPE (value)))) attrflags = ATTR_FLAG_TYPE_IN_PLACE; cplus_decl_attributes (&value, attrlist, attrflags); } if (decl_spec_seq_has_spec_p (declspecs, ds_typedef) && TREE_TYPE (value) != error_mark_node && TYPE_NAME (TYPE_MAIN_VARIANT (TREE_TYPE (value))) != value) set_underlying_type (value); /* It's important that push_template_decl below follows set_underlying_type above so that the created template carries the properly set type of VALUE. */ if (processing_template_decl) value = push_template_decl (value); record_locally_defined_typedef (value); return value; } int friendp = decl_spec_seq_has_spec_p (declspecs, ds_friend); if (!friendp && DECL_IN_AGGR_P (value)) { error ("%qD is already defined in %qT", value, DECL_CONTEXT (value)); return void_type_node; } if (asmspec_tree && asmspec_tree != error_mark_node) asmspec = TREE_STRING_POINTER (asmspec_tree); if (init) { if (TREE_CODE (value) == FUNCTION_DECL) { if (init == ridpointers[(int)RID_DELETE]) { if (friendp && decl_defined_p (value)) { error ("redefinition of %q#D", value); inform (DECL_SOURCE_LOCATION (value), "%q#D previously defined here", value); } else { DECL_DELETED_FN (value) = 1; DECL_DECLARED_INLINE_P (value) = 1; DECL_INITIAL (value) = error_mark_node; } } else if (init == ridpointers[(int)RID_DEFAULT]) { if (defaultable_fn_check (value)) { DECL_DEFAULTED_FN (value) = 1; DECL_INITIALIZED_IN_CLASS_P (value) = 1; DECL_DECLARED_INLINE_P (value) = 1; } } else if (TREE_CODE (init) == DEFERRED_PARSE) error ("invalid initializer for member function %qD", value); else if (TREE_CODE (TREE_TYPE (value)) == METHOD_TYPE) { if (integer_zerop (init)) DECL_PURE_VIRTUAL_P (value) = 1; else if (error_operand_p (init)) ; /* An error has already been reported. */ else error ("invalid initializer for member function %qD", value); } else { gcc_assert (TREE_CODE (TREE_TYPE (value)) == FUNCTION_TYPE); location_t iloc = cp_expr_loc_or_loc (init, DECL_SOURCE_LOCATION (value)); if (friendp) error_at (iloc, "initializer specified for friend " "function %qD", value); else error_at (iloc, "initializer specified for static " "member function %qD", value); } } else if (TREE_CODE (value) == FIELD_DECL) /* C++11 NSDMI, keep going. */; else if (!VAR_P (value)) gcc_unreachable (); } /* Pass friend decls back. */ if ((TREE_CODE (value) == FUNCTION_DECL || TREE_CODE (value) == TEMPLATE_DECL) && DECL_CONTEXT (value) != current_class_type) return value; /* Need to set this before push_template_decl. */ if (VAR_P (value)) DECL_CONTEXT (value) = current_class_type; if (processing_template_decl && VAR_OR_FUNCTION_DECL_P (value)) { value = push_template_decl (value); if (error_operand_p (value)) return error_mark_node; } if (attrlist) cplus_decl_attributes (&value, attrlist, 0); if (init && DIRECT_LIST_INIT_P (init)) flags = LOOKUP_NORMAL; else flags = LOOKUP_IMPLICIT; if (decl_spec_seq_has_spec_p (declspecs, ds_constinit)) flags |= LOOKUP_CONSTINIT; switch (TREE_CODE (value)) { case VAR_DECL: finish_static_data_member_decl (value, init, init_const_expr_p, asmspec_tree, flags); return value; case FIELD_DECL: if (asmspec) error ("%<asm%> specifiers are not permitted on non-static data members"); if (DECL_INITIAL (value) == error_mark_node) init = error_mark_node; cp_finish_decl (value, init, /*init_const_expr_p=*/false, NULL_TREE, flags); DECL_IN_AGGR_P (value) = 1; return value; case FUNCTION_DECL: if (asmspec) set_user_assembler_name (value, asmspec); cp_finish_decl (value, /*init=*/NULL_TREE, /*init_const_expr_p=*/false, asmspec_tree, flags); /* Pass friends back this way. */ if (DECL_FRIEND_P (value)) return void_type_node; DECL_IN_AGGR_P (value) = 1; return value; default: gcc_unreachable (); } return NULL_TREE; } /* Like `grokfield', but for bitfields. WIDTH is the width of the bitfield, a constant expression. The other parameters are as for grokfield. */ tree grokbitfield (const cp_declarator *declarator, cp_decl_specifier_seq *declspecs, tree width, tree init, tree attrlist) { tree value = grokdeclarator (declarator, declspecs, BITFIELD, init != NULL_TREE, &attrlist); if (value == error_mark_node) return NULL_TREE; /* friends went bad. */ tree type = TREE_TYPE (value); if (type == error_mark_node) return value; /* Pass friendly classes back. */ if (VOID_TYPE_P (value)) return void_type_node; if (!INTEGRAL_OR_ENUMERATION_TYPE_P (type) && (INDIRECT_TYPE_P (type) || !dependent_type_p (type))) { error_at (DECL_SOURCE_LOCATION (value), "bit-field %qD with non-integral type %qT", value, type); return error_mark_node; } if (TREE_CODE (value) == TYPE_DECL) { error_at (DECL_SOURCE_LOCATION (value), "cannot declare %qD to be a bit-field type", value); return NULL_TREE; } /* Usually, finish_struct_1 catches bitfields with invalid types. But, in the case of bitfields with function type, we confuse ourselves into thinking they are member functions, so we must check here. */ if (TREE_CODE (value) == FUNCTION_DECL) { error_at (DECL_SOURCE_LOCATION (value), "cannot declare bit-field %qD with function type", value); return NULL_TREE; } if (TYPE_WARN_IF_NOT_ALIGN (type)) { error_at (DECL_SOURCE_LOCATION (value), "cannot declare bit-field " "%qD with %<warn_if_not_aligned%> type", value); return NULL_TREE; } if (DECL_IN_AGGR_P (value)) { error ("%qD is already defined in the class %qT", value, DECL_CONTEXT (value)); return void_type_node; } if (TREE_STATIC (value)) { error_at (DECL_SOURCE_LOCATION (value), "static member %qD cannot be a bit-field", value); return NULL_TREE; } int flags = LOOKUP_IMPLICIT; if (init && DIRECT_LIST_INIT_P (init)) flags = LOOKUP_NORMAL; cp_finish_decl (value, init, false, NULL_TREE, flags); if (width != error_mark_node) { /* The width must be an integer type. */ if (!type_dependent_expression_p (width) && !INTEGRAL_OR_UNSCOPED_ENUMERATION_TYPE_P (TREE_TYPE (width))) error ("width of bit-field %qD has non-integral type %qT", value, TREE_TYPE (width)); else if (!check_for_bare_parameter_packs (width)) { /* Temporarily stash the width in DECL_BIT_FIELD_REPRESENTATIVE. check_bitfield_decl picks it from there later and sets DECL_SIZE accordingly. */ DECL_BIT_FIELD_REPRESENTATIVE (value) = width; SET_DECL_C_BIT_FIELD (value); } } DECL_IN_AGGR_P (value) = 1; if (attrlist) cplus_decl_attributes (&value, attrlist, /*flags=*/0); return value; } /* Returns true iff ATTR is an attribute which needs to be applied at instantiation time rather than template definition time. */ static bool is_late_template_attribute (tree attr, tree decl) { tree name = get_attribute_name (attr); tree args = TREE_VALUE (attr); const struct attribute_spec *spec = lookup_attribute_spec (name); tree arg; if (!spec) /* Unknown attribute. */ return false; /* Attribute weak handling wants to write out assembly right away. */ if (is_attribute_p ("weak", name)) return true; /* Attributes used and unused are applied directly to typedefs for the benefit of maybe_warn_unused_local_typedefs. */ if (TREE_CODE (decl) == TYPE_DECL && (is_attribute_p ("unused", name) || is_attribute_p ("used", name))) return false; /* Attribute tls_model wants to modify the symtab. */ if (is_attribute_p ("tls_model", name)) return true; /* #pragma omp declare simd attribute needs to be always deferred. */ if (flag_openmp && is_attribute_p ("omp declare simd", name)) return true; /* An attribute pack is clearly dependent. */ if (args && PACK_EXPANSION_P (args)) return true; /* If any of the arguments are dependent expressions, we can't evaluate the attribute until instantiation time. */ for (arg = args; arg; arg = TREE_CHAIN (arg)) { tree t = TREE_VALUE (arg); /* If the first attribute argument is an identifier, only consider second and following arguments. Attributes like mode, format, cleanup and several target specific attributes aren't late just because they have an IDENTIFIER_NODE as first argument. */ if (arg == args && attribute_takes_identifier_p (name) && identifier_p (t)) continue; if (value_dependent_expression_p (t)) return true; } if (TREE_CODE (decl) == TYPE_DECL || TYPE_P (decl) || spec->type_required) { tree type = TYPE_P (decl) ? decl : TREE_TYPE (decl); /* We can't apply any attributes to a completely unknown type until instantiation time. */ enum tree_code code = TREE_CODE (type); if (code == TEMPLATE_TYPE_PARM || code == BOUND_TEMPLATE_TEMPLATE_PARM || code == TYPENAME_TYPE) return true; /* Also defer most attributes on dependent types. This is not necessary in all cases, but is the better default. */ else if (dependent_type_p (type) /* But some attributes specifically apply to templates. */ && !is_attribute_p ("abi_tag", name) && !is_attribute_p ("deprecated", name) && !is_attribute_p ("visibility", name)) return true; else return false; } else return false; } /* ATTR_P is a list of attributes. Remove any attributes which need to be applied at instantiation time and return them. If IS_DEPENDENT is true, the declaration itself is dependent, so all attributes should be applied at instantiation time. */ tree splice_template_attributes (tree *attr_p, tree decl) { tree *p = attr_p; tree late_attrs = NULL_TREE; tree *q = &late_attrs; if (!p) return NULL_TREE; for (; *p; ) { if (is_late_template_attribute (*p, decl)) { ATTR_IS_DEPENDENT (*p) = 1; *q = *p; *p = TREE_CHAIN (*p); q = &TREE_CHAIN (*q); *q = NULL_TREE; } else p = &TREE_CHAIN (*p); } return late_attrs; } /* Remove any late attributes from the list in ATTR_P and attach them to DECL_P. */ static void save_template_attributes (tree *attr_p, tree *decl_p, int flags) { tree *q; if (attr_p && *attr_p == error_mark_node) return; tree late_attrs = splice_template_attributes (attr_p, *decl_p); if (!late_attrs) return; if (DECL_P (*decl_p)) q = &DECL_ATTRIBUTES (*decl_p); else q = &TYPE_ATTRIBUTES (*decl_p); tree old_attrs = *q; /* Merge the late attributes at the beginning with the attribute list. */ late_attrs = merge_attributes (late_attrs, *q); if (*q != late_attrs && !DECL_P (*decl_p) && !(flags & ATTR_FLAG_TYPE_IN_PLACE)) { if (!dependent_type_p (*decl_p)) *decl_p = cp_build_type_attribute_variant (*decl_p, late_attrs); else { *decl_p = build_variant_type_copy (*decl_p); TYPE_ATTRIBUTES (*decl_p) = late_attrs; } } else *q = late_attrs; if (!DECL_P (*decl_p) && *decl_p == TYPE_MAIN_VARIANT (*decl_p)) { /* We've added new attributes directly to the main variant, so now we need to update all of the other variants to include these new attributes. */ tree variant; for (variant = TYPE_NEXT_VARIANT (*decl_p); variant; variant = TYPE_NEXT_VARIANT (variant)) { gcc_assert (TYPE_ATTRIBUTES (variant) == old_attrs); TYPE_ATTRIBUTES (variant) = TYPE_ATTRIBUTES (*decl_p); } } } /* True if ATTRS contains any dependent attributes that affect type identity. */ bool any_dependent_type_attributes_p (tree attrs) { for (tree a = attrs; a; a = TREE_CHAIN (a)) if (ATTR_IS_DEPENDENT (a)) { const attribute_spec *as = lookup_attribute_spec (TREE_PURPOSE (a)); if (as && as->affects_type_identity) return true; } return false; } /* Return true iff ATTRS are acceptable attributes to be applied in-place to a typedef which gives a previously unnamed class or enum a name for linkage purposes. */ bool attributes_naming_typedef_ok (tree attrs) { for (; attrs; attrs = TREE_CHAIN (attrs)) { tree name = get_attribute_name (attrs); if (is_attribute_p ("vector_size", name)) return false; } return true; } /* Like reconstruct_complex_type, but handle also template trees. */ tree cp_reconstruct_complex_type (tree type, tree bottom) { tree inner, outer; if (TYPE_PTR_P (type)) { inner = cp_reconstruct_complex_type (TREE_TYPE (type), bottom); outer = build_pointer_type_for_mode (inner, TYPE_MODE (type), TYPE_REF_CAN_ALIAS_ALL (type)); } else if (TYPE_REF_P (type)) { inner = cp_reconstruct_complex_type (TREE_TYPE (type), bottom); outer = build_reference_type_for_mode (inner, TYPE_MODE (type), TYPE_REF_CAN_ALIAS_ALL (type)); } else if (TREE_CODE (type) == ARRAY_TYPE) { inner = cp_reconstruct_complex_type (TREE_TYPE (type), bottom); outer = build_cplus_array_type (inner, TYPE_DOMAIN (type)); /* Don't call cp_build_qualified_type on ARRAY_TYPEs, the element type qualification will be handled by the recursive cp_reconstruct_complex_type call and cp_build_qualified_type for ARRAY_TYPEs changes the element type. */ return outer; } else if (TREE_CODE (type) == FUNCTION_TYPE) { inner = cp_reconstruct_complex_type (TREE_TYPE (type), bottom); outer = build_function_type (inner, TYPE_ARG_TYPES (type)); outer = apply_memfn_quals (outer, type_memfn_quals (type)); } else if (TREE_CODE (type) == METHOD_TYPE) { inner = cp_reconstruct_complex_type (TREE_TYPE (type), bottom); /* The build_method_type_directly() routine prepends 'this' to argument list, so we must compensate by getting rid of it. */ outer = build_method_type_directly (class_of_this_parm (type), inner, TREE_CHAIN (TYPE_ARG_TYPES (type))); } else if (TREE_CODE (type) == OFFSET_TYPE) { inner = cp_reconstruct_complex_type (TREE_TYPE (type), bottom); outer = build_offset_type (TYPE_OFFSET_BASETYPE (type), inner); } else return bottom; if (TYPE_ATTRIBUTES (type)) outer = cp_build_type_attribute_variant (outer, TYPE_ATTRIBUTES (type)); outer = cp_build_qualified_type (outer, cp_type_quals (type)); outer = cxx_copy_lang_qualifiers (outer, type); return outer; } /* Replaces any constexpr expression that may be into the attributes arguments with their reduced value. */ void cp_check_const_attributes (tree attributes) { if (attributes == error_mark_node) return; tree attr; for (attr = attributes; attr; attr = TREE_CHAIN (attr)) { tree arg; for (arg = TREE_VALUE (attr); arg && TREE_CODE (arg) == TREE_LIST; arg = TREE_CHAIN (arg)) { tree expr = TREE_VALUE (arg); if (EXPR_P (expr)) TREE_VALUE (arg) = fold_non_dependent_expr (expr); } } } /* Return true if TYPE is an OpenMP mappable type. If NOTES is non-zero, emit a note message for each problem. */ static bool cp_omp_mappable_type_1 (tree type, bool notes) { bool result = true; /* Mappable type has to be complete. */ if (type == error_mark_node || !COMPLETE_TYPE_P (type)) { if (notes && type != error_mark_node) { tree decl = TYPE_MAIN_DECL (type); inform ((decl ? DECL_SOURCE_LOCATION (decl) : input_location), "incomplete type %qT is not mappable", type); } result = false; } /* Arrays have mappable type if the elements have mappable type. */ while (TREE_CODE (type) == ARRAY_TYPE) type = TREE_TYPE (type); /* A mappable type cannot contain virtual members. */ if (CLASS_TYPE_P (type) && CLASSTYPE_VTABLES (type)) { if (notes) inform (DECL_SOURCE_LOCATION (TYPE_MAIN_DECL (type)), "type %qT with virtual members is not mappable", type); result = false; } /* All data members must be non-static. */ if (CLASS_TYPE_P (type)) { tree field; for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field)) if (VAR_P (field)) { if (notes) inform (DECL_SOURCE_LOCATION (field), "static field %qD is not mappable", field); result = false; } /* All fields must have mappable types. */ else if (TREE_CODE (field) == FIELD_DECL && !cp_omp_mappable_type_1 (TREE_TYPE (field), notes)) result = false; } return result; } /* Return true if TYPE is an OpenMP mappable type. */ bool cp_omp_mappable_type (tree type) { return cp_omp_mappable_type_1 (type, false); } /* Return true if TYPE is an OpenMP mappable type. Emit an error messages if not. */ bool cp_omp_emit_unmappable_type_notes (tree type) { return cp_omp_mappable_type_1 (type, true); } /* Return the last pushed declaration for the symbol DECL or NULL when no such declaration exists. */ static tree find_last_decl (tree decl) { tree last_decl = NULL_TREE; if (tree name = DECL_P (decl) ? DECL_NAME (decl) : NULL_TREE) { /* Look up the declaration in its scope. */ tree pushed_scope = NULL_TREE; if (tree ctype = DECL_CONTEXT (decl)) pushed_scope = push_scope (ctype); last_decl = lookup_name (name); if (pushed_scope) pop_scope (pushed_scope); /* The declaration may be a member conversion operator or a bunch of overfloads (handle the latter below). */ if (last_decl && BASELINK_P (last_decl)) last_decl = BASELINK_FUNCTIONS (last_decl); } if (!last_decl) return NULL_TREE; if (DECL_P (last_decl) || TREE_CODE (last_decl) == OVERLOAD) { /* A set of overloads of the same function. */ for (lkp_iterator iter (last_decl); iter; ++iter) { if (TREE_CODE (*iter) == OVERLOAD) continue; if (decls_match (decl, *iter, /*record_decls=*/false)) return *iter; } return NULL_TREE; } return NULL_TREE; } /* Like decl_attributes, but handle C++ complexity. */ void cplus_decl_attributes (tree *decl, tree attributes, int flags) { if (*decl == NULL_TREE || *decl == void_type_node || *decl == error_mark_node) return; /* Add implicit "omp declare target" attribute if requested. */ if (scope_chain->omp_declare_target_attribute && ((VAR_P (*decl) && (TREE_STATIC (*decl) || DECL_EXTERNAL (*decl))) || TREE_CODE (*decl) == FUNCTION_DECL)) { if (VAR_P (*decl) && DECL_CLASS_SCOPE_P (*decl)) error ("%q+D static data member inside of declare target directive", *decl); else if (VAR_P (*decl) && (processing_template_decl || !cp_omp_mappable_type (TREE_TYPE (*decl)))) attributes = tree_cons (get_identifier ("omp declare target implicit"), NULL_TREE, attributes); else { attributes = tree_cons (get_identifier ("omp declare target"), NULL_TREE, attributes); attributes = tree_cons (get_identifier ("omp declare target block"), NULL_TREE, attributes); } } if (processing_template_decl) { if (check_for_bare_parameter_packs (attributes)) return; save_template_attributes (&attributes, decl, flags); } cp_check_const_attributes (attributes); if (TREE_CODE (*decl) == TEMPLATE_DECL) decl = &DECL_TEMPLATE_RESULT (*decl); if (TREE_TYPE (*decl) && TYPE_PTRMEMFUNC_P (TREE_TYPE (*decl))) { attributes = decl_attributes (decl, attributes, flags | ATTR_FLAG_FUNCTION_NEXT); decl_attributes (&TYPE_PTRMEMFUNC_FN_TYPE_RAW (TREE_TYPE (*decl)), attributes, flags); } else { tree last_decl = find_last_decl (*decl); decl_attributes (decl, attributes, flags, last_decl); } if (TREE_CODE (*decl) == TYPE_DECL) SET_IDENTIFIER_TYPE_VALUE (DECL_NAME (*decl), TREE_TYPE (*decl)); /* Propagate deprecation out to the template. */ if (TREE_DEPRECATED (*decl)) if (tree ti = get_template_info (*decl)) { tree tmpl = TI_TEMPLATE (ti); tree pattern = (TYPE_P (*decl) ? TREE_TYPE (tmpl) : DECL_TEMPLATE_RESULT (tmpl)); if (*decl == pattern) TREE_DEPRECATED (tmpl) = true; } } /* Walks through the namespace- or function-scope anonymous union OBJECT, with the indicated TYPE, building appropriate VAR_DECLs. Returns one of the fields for use in the mangled name. */ static tree build_anon_union_vars (tree type, tree object) { tree main_decl = NULL_TREE; tree field; /* Rather than write the code to handle the non-union case, just give an error. */ if (TREE_CODE (type) != UNION_TYPE) { error_at (DECL_SOURCE_LOCATION (TYPE_MAIN_DECL (type)), "anonymous struct not inside named type"); return error_mark_node; } for (field = TYPE_FIELDS (type); field != NULL_TREE; field = DECL_CHAIN (field)) { tree decl; tree ref; if (DECL_ARTIFICIAL (field)) continue; if (TREE_CODE (field) != FIELD_DECL) { permerror (DECL_SOURCE_LOCATION (field), "%q#D invalid; an anonymous union can only " "have non-static data members", field); continue; } if (TREE_PRIVATE (field)) permerror (DECL_SOURCE_LOCATION (field), "private member %q#D in anonymous union", field); else if (TREE_PROTECTED (field)) permerror (DECL_SOURCE_LOCATION (field), "protected member %q#D in anonymous union", field); if (processing_template_decl) ref = build_min_nt_loc (UNKNOWN_LOCATION, COMPONENT_REF, object, DECL_NAME (field), NULL_TREE); else ref = build_class_member_access_expr (object, field, NULL_TREE, false, tf_warning_or_error); if (DECL_NAME (field)) { tree base; decl = build_decl (input_location, VAR_DECL, DECL_NAME (field), TREE_TYPE (field)); DECL_ANON_UNION_VAR_P (decl) = 1; DECL_ARTIFICIAL (decl) = 1; base = get_base_address (object); TREE_PUBLIC (decl) = TREE_PUBLIC (base); TREE_STATIC (decl) = TREE_STATIC (base); DECL_EXTERNAL (decl) = DECL_EXTERNAL (base); SET_DECL_VALUE_EXPR (decl, ref); DECL_HAS_VALUE_EXPR_P (decl) = 1; decl = pushdecl (decl); } else if (ANON_AGGR_TYPE_P (TREE_TYPE (field))) decl = build_anon_union_vars (TREE_TYPE (field), ref); else decl = 0; if (main_decl == NULL_TREE) main_decl = decl; } return main_decl; } /* Finish off the processing of a UNION_TYPE structure. If the union is an anonymous union, then all members must be laid out together. PUBLIC_P is nonzero if this union is not declared static. */ void finish_anon_union (tree anon_union_decl) { tree type; tree main_decl; bool public_p; if (anon_union_decl == error_mark_node) return; type = TREE_TYPE (anon_union_decl); public_p = TREE_PUBLIC (anon_union_decl); /* The VAR_DECL's context is the same as the TYPE's context. */ DECL_CONTEXT (anon_union_decl) = DECL_CONTEXT (TYPE_NAME (type)); if (TYPE_FIELDS (type) == NULL_TREE) return; if (public_p) { error ("namespace-scope anonymous aggregates must be static"); return; } main_decl = build_anon_union_vars (type, anon_union_decl); if (main_decl == error_mark_node) return; if (main_decl == NULL_TREE) { pedwarn (input_location, 0, "anonymous union with no members"); return; } if (!processing_template_decl) { /* Use main_decl to set the mangled name. */ DECL_NAME (anon_union_decl) = DECL_NAME (main_decl); maybe_commonize_var (anon_union_decl); if (TREE_STATIC (anon_union_decl) || DECL_EXTERNAL (anon_union_decl)) { if (DECL_DISCRIMINATOR_P (anon_union_decl)) determine_local_discriminator (anon_union_decl); mangle_decl (anon_union_decl); } DECL_NAME (anon_union_decl) = NULL_TREE; } pushdecl (anon_union_decl); cp_finish_decl (anon_union_decl, NULL_TREE, false, NULL_TREE, 0); } /* Auxiliary functions to make type signatures for `operator new' and `operator delete' correspond to what compiler will be expecting. */ tree coerce_new_type (tree type, location_t loc) { int e = 0; tree args = TYPE_ARG_TYPES (type); gcc_assert (TREE_CODE (type) == FUNCTION_TYPE); if (!same_type_p (TREE_TYPE (type), ptr_type_node)) { e = 1; error_at (loc, "%<operator new%> must return type %qT", ptr_type_node); } if (args && args != void_list_node) { if (TREE_PURPOSE (args)) { /* [basic.stc.dynamic.allocation] The first parameter shall not have an associated default argument. */ error_at (loc, "the first parameter of %<operator new%> cannot " "have a default argument"); /* Throw away the default argument. */ TREE_PURPOSE (args) = NULL_TREE; } if (!same_type_p (TREE_VALUE (args), size_type_node)) { e = 2; args = TREE_CHAIN (args); } } else e = 2; if (e == 2) permerror (loc, "%<operator new%> takes type %<size_t%> (%qT) " "as first parameter", size_type_node); switch (e) { case 2: args = tree_cons (NULL_TREE, size_type_node, args); /* Fall through. */ case 1: type = (cxx_copy_lang_qualifiers (build_function_type (ptr_type_node, args), type)); /* Fall through. */ default:; } return type; } void coerce_delete_type (tree decl, location_t loc) { int e = 0; tree type = TREE_TYPE (decl); tree args = TYPE_ARG_TYPES (type); gcc_assert (TREE_CODE (type) == FUNCTION_TYPE); if (!same_type_p (TREE_TYPE (type), void_type_node)) { e = 1; error_at (loc, "%<operator delete%> must return type %qT", void_type_node); } tree ptrtype = ptr_type_node; if (destroying_delete_p (decl)) { if (DECL_CLASS_SCOPE_P (decl)) /* If the function is a destroying operator delete declared in class type C, the type of its first parameter shall be C*. */ ptrtype = build_pointer_type (DECL_CONTEXT (decl)); else /* A destroying operator delete shall be a class member function named operator delete. */ error_at (loc, "destroying %<operator delete%> must be a member function"); const ovl_op_info_t *op = IDENTIFIER_OVL_OP_INFO (DECL_NAME (decl)); if (op->flags & OVL_OP_FLAG_VEC) error_at (loc, "%<operator delete[]%> cannot be a destroying delete"); if (!usual_deallocation_fn_p (decl)) error_at (loc, "destroying %<operator delete%> must be a usual " "deallocation function"); } if (!args || args == void_list_node || !same_type_p (TREE_VALUE (args), ptrtype)) { e = 2; if (args && args != void_list_node) args = TREE_CHAIN (args); error_at (loc, "%<operator delete%> takes type %qT as first parameter", ptrtype); } switch (e) { case 2: args = tree_cons (NULL_TREE, ptrtype, args); /* Fall through. */ case 1: type = (cxx_copy_lang_qualifiers (build_function_type (void_type_node, args), type)); /* Fall through. */ default:; } TREE_TYPE (decl) = type; } /* DECL is a VAR_DECL for a vtable: walk through the entries in the vtable and mark them as needed. */ static void mark_vtable_entries (tree decl) { tree fnaddr; unsigned HOST_WIDE_INT idx; /* It's OK for the vtable to refer to deprecated virtual functions. */ warning_sentinel w(warn_deprecated_decl); FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (DECL_INITIAL (decl)), idx, fnaddr) { tree fn; STRIP_NOPS (fnaddr); if (TREE_CODE (fnaddr) != ADDR_EXPR && TREE_CODE (fnaddr) != FDESC_EXPR) /* This entry is an offset: a virtual base class offset, a virtual call offset, an RTTI offset, etc. */ continue; fn = TREE_OPERAND (fnaddr, 0); TREE_ADDRESSABLE (fn) = 1; /* When we don't have vcall offsets, we output thunks whenever we output the vtables that contain them. With vcall offsets, we know all the thunks we'll need when we emit a virtual function, so we emit the thunks there instead. */ if (DECL_THUNK_P (fn)) use_thunk (fn, /*emit_p=*/0); /* Set the location, as marking the function could cause instantiation. We do not need to preserve the incoming location, as we're called from c_parse_final_cleanups, which takes care of that. */ input_location = DECL_SOURCE_LOCATION (fn); mark_used (fn); } } /* Adjust the TLS model on variable DECL if need be, typically after the linkage of DECL has been modified. */ static void adjust_var_decl_tls_model (tree decl) { if (CP_DECL_THREAD_LOCAL_P (decl) && !lookup_attribute ("tls_model", DECL_ATTRIBUTES (decl))) set_decl_tls_model (decl, decl_default_tls_model (decl)); } /* Set DECL up to have the closest approximation of "initialized common" linkage available. */ void comdat_linkage (tree decl) { if (flag_weak) make_decl_one_only (decl, cxx_comdat_group (decl)); else if (TREE_CODE (decl) == FUNCTION_DECL || (VAR_P (decl) && DECL_ARTIFICIAL (decl))) /* We can just emit function and compiler-generated variables statically; having multiple copies is (for the most part) only a waste of space. There are two correctness issues, however: the address of a template instantiation with external linkage should be the same, independent of what translation unit asks for the address, and this will not hold when we emit multiple copies of the function. However, there's little else we can do. Also, by default, the typeinfo implementation assumes that there will be only one copy of the string used as the name for each type. Therefore, if weak symbols are unavailable, the run-time library should perform a more conservative check; it should perform a string comparison, rather than an address comparison. */ TREE_PUBLIC (decl) = 0; else { /* Static data member template instantiations, however, cannot have multiple copies. */ if (DECL_INITIAL (decl) == 0 || DECL_INITIAL (decl) == error_mark_node) DECL_COMMON (decl) = 1; else if (EMPTY_CONSTRUCTOR_P (DECL_INITIAL (decl))) { DECL_COMMON (decl) = 1; DECL_INITIAL (decl) = error_mark_node; } else if (!DECL_EXPLICIT_INSTANTIATION (decl)) { /* We can't do anything useful; leave vars for explicit instantiation. */ DECL_EXTERNAL (decl) = 1; DECL_NOT_REALLY_EXTERN (decl) = 0; } } if (TREE_PUBLIC (decl)) DECL_COMDAT (decl) = 1; if (VAR_P (decl)) adjust_var_decl_tls_model (decl); } /* For win32 we also want to put explicit instantiations in linkonce sections, so that they will be merged with implicit instantiations; otherwise we get duplicate symbol errors. For Darwin we do not want explicit instantiations to be linkonce. */ void maybe_make_one_only (tree decl) { /* We used to say that this was not necessary on targets that support weak symbols, because the implicit instantiations will defer to the explicit one. However, that's not actually the case in SVR4; a strong definition after a weak one is an error. Also, not making explicit instantiations one_only means that we can end up with two copies of some template instantiations. */ if (! flag_weak) return; /* We can't set DECL_COMDAT on functions, or cp_finish_file will think we can get away with not emitting them if they aren't used. We need to for variables so that cp_finish_decl will update their linkage, because their DECL_INITIAL may not have been set properly yet. */ if (!TARGET_WEAK_NOT_IN_ARCHIVE_TOC || (! DECL_EXPLICIT_INSTANTIATION (decl) && ! DECL_TEMPLATE_SPECIALIZATION (decl))) { make_decl_one_only (decl, cxx_comdat_group (decl)); if (VAR_P (decl)) { varpool_node *node = varpool_node::get_create (decl); DECL_COMDAT (decl) = 1; /* Mark it needed so we don't forget to emit it. */ node->forced_by_abi = true; TREE_USED (decl) = 1; adjust_var_decl_tls_model (decl); } } } /* Returns true iff DECL, a FUNCTION_DECL or VAR_DECL, has vague linkage. This predicate will give the right answer during parsing of the function, which other tests may not. */ bool vague_linkage_p (tree decl) { if (!TREE_PUBLIC (decl)) { /* maybe_thunk_body clears TREE_PUBLIC and DECL_ABSTRACT_P on the maybe-in-charge 'tor variants; in that case we need to check one of the "clones" for the real linkage. But only in that case; before maybe_clone_body we haven't yet copied the linkage to the clones. */ if (DECL_MAYBE_IN_CHARGE_CDTOR_P (decl) && !DECL_ABSTRACT_P (decl) && DECL_CHAIN (decl) && DECL_CLONED_FUNCTION_P (DECL_CHAIN (decl))) return vague_linkage_p (DECL_CHAIN (decl)); gcc_checking_assert (!DECL_COMDAT (decl)); return false; } /* Unfortunately, import_export_decl has not always been called before the function is processed, so we cannot simply check DECL_COMDAT. */ if (DECL_COMDAT (decl) || (TREE_CODE (decl) == FUNCTION_DECL && DECL_DECLARED_INLINE_P (decl)) || (DECL_LANG_SPECIFIC (decl) && DECL_TEMPLATE_INSTANTIATION (decl)) || (VAR_P (decl) && DECL_INLINE_VAR_P (decl))) return true; else if (DECL_FUNCTION_SCOPE_P (decl)) /* A local static in an inline effectively has vague linkage. */ return (TREE_STATIC (decl) && vague_linkage_p (DECL_CONTEXT (decl))); else return false; } /* Determine whether or not we want to specifically import or export CTYPE, using various heuristics. */ static void import_export_class (tree ctype) { /* -1 for imported, 1 for exported. */ int import_export = 0; /* It only makes sense to call this function at EOF. The reason is that this function looks at whether or not the first non-inline non-abstract virtual member function has been defined in this translation unit. But, we can't possibly know that until we've seen the entire translation unit. */ gcc_assert (at_eof); if (CLASSTYPE_INTERFACE_KNOWN (ctype)) return; /* If MULTIPLE_SYMBOL_SPACES is set and we saw a #pragma interface, we will have CLASSTYPE_INTERFACE_ONLY set but not CLASSTYPE_INTERFACE_KNOWN. In that case, we don't want to use this heuristic because someone will supply a #pragma implementation elsewhere, and deducing it here would produce a conflict. */ if (CLASSTYPE_INTERFACE_ONLY (ctype)) return; if (lookup_attribute ("dllimport", TYPE_ATTRIBUTES (ctype))) import_export = -1; else if (lookup_attribute ("dllexport", TYPE_ATTRIBUTES (ctype))) import_export = 1; else if (CLASSTYPE_IMPLICIT_INSTANTIATION (ctype) && !flag_implicit_templates) /* For a template class, without -fimplicit-templates, check the repository. If the virtual table is assigned to this translation unit, then export the class; otherwise, import it. */ import_export = -1; else if (TYPE_POLYMORPHIC_P (ctype)) { /* The ABI specifies that the virtual table and associated information are emitted with the key method, if any. */ tree method = CLASSTYPE_KEY_METHOD (ctype); /* If weak symbol support is not available, then we must be careful not to emit the vtable when the key function is inline. An inline function can be defined in multiple translation units. If we were to emit the vtable in each translation unit containing a definition, we would get multiple definition errors at link-time. */ if (method && (flag_weak || ! DECL_DECLARED_INLINE_P (method))) import_export = (DECL_REALLY_EXTERN (method) ? -1 : 1); } /* When MULTIPLE_SYMBOL_SPACES is set, we cannot count on seeing a definition anywhere else. */ if (MULTIPLE_SYMBOL_SPACES && import_export == -1) import_export = 0; /* Allow back ends the chance to overrule the decision. */ if (targetm.cxx.import_export_class) import_export = targetm.cxx.import_export_class (ctype, import_export); if (import_export) { SET_CLASSTYPE_INTERFACE_KNOWN (ctype); CLASSTYPE_INTERFACE_ONLY (ctype) = (import_export < 0); } } /* Return true if VAR has already been provided to the back end; in that case VAR should not be modified further by the front end. */ static bool var_finalized_p (tree var) { return varpool_node::get_create (var)->definition; } /* DECL is a VAR_DECL or FUNCTION_DECL which, for whatever reason, must be emitted in this translation unit. Mark it as such. */ void mark_needed (tree decl) { TREE_USED (decl) = 1; if (TREE_CODE (decl) == FUNCTION_DECL) { /* Extern inline functions don't become needed when referenced. If we know a method will be emitted in other TU and no new functions can be marked reachable, just use the external definition. */ struct cgraph_node *node = cgraph_node::get_create (decl); node->forced_by_abi = true; /* #pragma interface can call mark_needed for maybe-in-charge 'tors; mark the clones as well. */ tree clone; FOR_EACH_CLONE (clone, decl) mark_needed (clone); } else if (VAR_P (decl)) { varpool_node *node = varpool_node::get_create (decl); /* C++ frontend use mark_decl_references to force COMDAT variables to be output that might appear dead otherwise. */ node->forced_by_abi = true; } } /* DECL is either a FUNCTION_DECL or a VAR_DECL. This function returns true if a definition of this entity should be provided in this object file. Callers use this function to determine whether or not to let the back end know that a definition of DECL is available in this translation unit. */ bool decl_needed_p (tree decl) { gcc_assert (VAR_OR_FUNCTION_DECL_P (decl)); /* This function should only be called at the end of the translation unit. We cannot be sure of whether or not something will be COMDAT until that point. */ gcc_assert (at_eof); /* All entities with external linkage that are not COMDAT/EXTERN should be emitted; they may be referred to from other object files. */ if (TREE_PUBLIC (decl) && !DECL_COMDAT (decl) && !DECL_REALLY_EXTERN (decl)) return true; /* Functions marked "dllexport" must be emitted so that they are visible to other DLLs. */ if (flag_keep_inline_dllexport && lookup_attribute ("dllexport", DECL_ATTRIBUTES (decl))) return true; /* When not optimizing, do not bother to produce definitions for extern symbols. */ if (DECL_REALLY_EXTERN (decl) && ((TREE_CODE (decl) != FUNCTION_DECL && !optimize) || (TREE_CODE (decl) == FUNCTION_DECL && !opt_for_fn (decl, optimize))) && !lookup_attribute ("always_inline", decl)) return false; /* If this entity was used, let the back end see it; it will decide whether or not to emit it into the object file. */ if (TREE_USED (decl)) return true; /* Virtual functions might be needed for devirtualization. */ if (flag_devirtualize && TREE_CODE (decl) == FUNCTION_DECL && DECL_VIRTUAL_P (decl)) return true; /* Otherwise, DECL does not need to be emitted -- yet. A subsequent reference to DECL might cause it to be emitted later. */ return false; } /* If necessary, write out the vtables for the dynamic class CTYPE. Returns true if any vtables were emitted. */ static bool maybe_emit_vtables (tree ctype) { tree vtbl; tree primary_vtbl; int needed = 0; varpool_node *current = NULL, *last = NULL; /* If the vtables for this class have already been emitted there is nothing more to do. */ primary_vtbl = CLASSTYPE_VTABLES (ctype); if (var_finalized_p (primary_vtbl)) return false; /* Ignore dummy vtables made by get_vtable_decl. */ if (TREE_TYPE (primary_vtbl) == void_type_node) return false; /* On some targets, we cannot determine the key method until the end of the translation unit -- which is when this function is called. */ if (!targetm.cxx.key_method_may_be_inline ()) determine_key_method (ctype); /* See if any of the vtables are needed. */ for (vtbl = CLASSTYPE_VTABLES (ctype); vtbl; vtbl = DECL_CHAIN (vtbl)) { import_export_decl (vtbl); if (DECL_NOT_REALLY_EXTERN (vtbl) && decl_needed_p (vtbl)) needed = 1; } if (!needed) { /* If the references to this class' vtables are optimized away, still emit the appropriate debugging information. See dfs_debug_mark. */ if (DECL_COMDAT (primary_vtbl) && CLASSTYPE_DEBUG_REQUESTED (ctype)) note_debug_info_needed (ctype); return false; } /* The ABI requires that we emit all of the vtables if we emit any of them. */ for (vtbl = CLASSTYPE_VTABLES (ctype); vtbl; vtbl = DECL_CHAIN (vtbl)) { /* Mark entities references from the virtual table as used. */ mark_vtable_entries (vtbl); if (TREE_TYPE (DECL_INITIAL (vtbl)) == 0) { vec<tree, va_gc> *cleanups = NULL; tree expr = store_init_value (vtbl, DECL_INITIAL (vtbl), &cleanups, LOOKUP_NORMAL); /* It had better be all done at compile-time. */ gcc_assert (!expr && !cleanups); } /* Write it out. */ DECL_EXTERNAL (vtbl) = 0; rest_of_decl_compilation (vtbl, 1, 1); /* Because we're only doing syntax-checking, we'll never end up actually marking the variable as written. */ if (flag_syntax_only) TREE_ASM_WRITTEN (vtbl) = 1; else if (DECL_ONE_ONLY (vtbl)) { current = varpool_node::get_create (vtbl); if (last) current->add_to_same_comdat_group (last); last = current; } } /* For abstract classes, the destructor has been removed from the vtable (in class.c's build_vtbl_initializer). For a compiler- generated destructor, it hence might not have been generated in this translation unit - and with '#pragma interface' it might never get generated. */ if (CLASSTYPE_PURE_VIRTUALS (ctype) && TYPE_HAS_NONTRIVIAL_DESTRUCTOR (ctype) && !CLASSTYPE_LAZY_DESTRUCTOR (ctype) && DECL_DEFAULTED_IN_CLASS_P (CLASSTYPE_DESTRUCTOR (ctype))) note_vague_linkage_fn (CLASSTYPE_DESTRUCTOR (ctype)); /* Since we're writing out the vtable here, also write the debug info. */ note_debug_info_needed (ctype); return true; } /* A special return value from type_visibility meaning internal linkage. */ enum { VISIBILITY_ANON = VISIBILITY_INTERNAL+1 }; static int expr_visibility (tree); static int type_visibility (tree); /* walk_tree helper function for type_visibility. */ static tree min_vis_r (tree *tp, int *walk_subtrees, void *data) { int *vis_p = (int *)data; if (! TYPE_P (*tp)) { *walk_subtrees = 0; } else if (OVERLOAD_TYPE_P (*tp) && !TREE_PUBLIC (TYPE_MAIN_DECL (*tp))) { *vis_p = VISIBILITY_ANON; return *tp; } else if (CLASS_TYPE_P (*tp) && CLASSTYPE_VISIBILITY (*tp) > *vis_p) *vis_p = CLASSTYPE_VISIBILITY (*tp); else if (TREE_CODE (*tp) == ARRAY_TYPE && uses_template_parms (TYPE_DOMAIN (*tp))) { int evis = expr_visibility (TYPE_MAX_VALUE (TYPE_DOMAIN (*tp))); if (evis > *vis_p) *vis_p = evis; } return NULL; } /* walk_tree helper function for expr_visibility. */ static tree min_vis_expr_r (tree *tp, int */*walk_subtrees*/, void *data) { int *vis_p = (int *)data; int tpvis = VISIBILITY_DEFAULT; switch (TREE_CODE (*tp)) { case CAST_EXPR: case IMPLICIT_CONV_EXPR: case STATIC_CAST_EXPR: case REINTERPRET_CAST_EXPR: case CONST_CAST_EXPR: case DYNAMIC_CAST_EXPR: case NEW_EXPR: case CONSTRUCTOR: case LAMBDA_EXPR: tpvis = type_visibility (TREE_TYPE (*tp)); break; case VAR_DECL: case FUNCTION_DECL: if (! TREE_PUBLIC (*tp)) tpvis = VISIBILITY_ANON; else tpvis = DECL_VISIBILITY (*tp); break; default: break; } if (tpvis > *vis_p) *vis_p = tpvis; return NULL_TREE; } /* Returns the visibility of TYPE, which is the minimum visibility of its component types. */ static int type_visibility (tree type) { int vis = VISIBILITY_DEFAULT; cp_walk_tree_without_duplicates (&type, min_vis_r, &vis); return vis; } /* Returns the visibility of an expression EXPR that appears in the signature of a function template, which is the minimum visibility of names that appear in its mangling. */ static int expr_visibility (tree expr) { int vis = VISIBILITY_DEFAULT; cp_walk_tree_without_duplicates (&expr, min_vis_expr_r, &vis); return vis; } /* Limit the visibility of DECL to VISIBILITY, if not explicitly specified (or if VISIBILITY is static). If TMPL is true, this constraint is for a template argument, and takes precedence over explicitly-specified visibility on the template. */ static void constrain_visibility (tree decl, int visibility, bool tmpl) { if (visibility == VISIBILITY_ANON) { /* extern "C" declarations aren't affected by the anonymous namespace. */ if (!DECL_EXTERN_C_P (decl)) { TREE_PUBLIC (decl) = 0; DECL_WEAK (decl) = 0; DECL_COMMON (decl) = 0; DECL_COMDAT (decl) = false; if (VAR_OR_FUNCTION_DECL_P (decl)) { struct symtab_node *snode = symtab_node::get (decl); if (snode) snode->set_comdat_group (NULL); } DECL_INTERFACE_KNOWN (decl) = 1; if (DECL_LANG_SPECIFIC (decl)) DECL_NOT_REALLY_EXTERN (decl) = 1; } } else if (visibility > DECL_VISIBILITY (decl) && (tmpl || !DECL_VISIBILITY_SPECIFIED (decl))) { DECL_VISIBILITY (decl) = (enum symbol_visibility) visibility; /* This visibility was not specified. */ DECL_VISIBILITY_SPECIFIED (decl) = false; } } /* Constrain the visibility of DECL based on the visibility of its template arguments. */ static void constrain_visibility_for_template (tree decl, tree targs) { /* If this is a template instantiation, check the innermost template args for visibility constraints. The outer template args are covered by the class check. */ tree args = INNERMOST_TEMPLATE_ARGS (targs); int i; for (i = TREE_VEC_LENGTH (args); i > 0; --i) { int vis = 0; tree arg = TREE_VEC_ELT (args, i-1); if (TYPE_P (arg)) vis = type_visibility (arg); else vis = expr_visibility (arg); if (vis) constrain_visibility (decl, vis, true); } } /* Like c_determine_visibility, but with additional C++-specific behavior. Function-scope entities can rely on the function's visibility because it is set in start_preparsed_function. Class-scope entities cannot rely on the class's visibility until the end of the enclosing class definition. Note that because namespaces have multiple independent definitions, namespace visibility is handled elsewhere using the #pragma visibility machinery rather than by decorating the namespace declaration. The goal is for constraints from the type to give a diagnostic, and other constraints to be applied silently. */ void determine_visibility (tree decl) { /* Remember that all decls get VISIBILITY_DEFAULT when built. */ /* Only relevant for names with external linkage. */ if (!TREE_PUBLIC (decl)) return; /* Cloned constructors and destructors get the same visibility as the underlying function. That should be set up in maybe_clone_body. */ gcc_assert (!DECL_CLONED_FUNCTION_P (decl)); bool orig_visibility_specified = DECL_VISIBILITY_SPECIFIED (decl); enum symbol_visibility orig_visibility = DECL_VISIBILITY (decl); /* The decl may be a template instantiation, which could influence visibilty. */ tree template_decl = NULL_TREE; if (TREE_CODE (decl) == TYPE_DECL) { if (CLASS_TYPE_P (TREE_TYPE (decl))) { if (CLASSTYPE_USE_TEMPLATE (TREE_TYPE (decl))) template_decl = decl; } else if (TYPE_TEMPLATE_INFO (TREE_TYPE (decl))) template_decl = decl; } else if (DECL_LANG_SPECIFIC (decl) && DECL_USE_TEMPLATE (decl)) template_decl = decl; if (TREE_CODE (decl) == TYPE_DECL && LAMBDA_TYPE_P (TREE_TYPE (decl)) && CLASSTYPE_LAMBDA_EXPR (TREE_TYPE (decl)) != error_mark_node) if (tree extra = LAMBDA_TYPE_EXTRA_SCOPE (TREE_TYPE (decl))) { /* The lambda's visibility is limited by that of its extra scope. */ int vis = 0; if (TYPE_P (extra)) vis = type_visibility (extra); else vis = expr_visibility (extra); constrain_visibility (decl, vis, false); } /* If DECL is a member of a class, visibility specifiers on the class can influence the visibility of the DECL. */ tree class_type = NULL_TREE; if (DECL_CLASS_SCOPE_P (decl)) class_type = DECL_CONTEXT (decl); else { /* Not a class member. */ /* Virtual tables have DECL_CONTEXT set to their associated class, so they are automatically handled above. */ gcc_assert (!VAR_P (decl) || !DECL_VTABLE_OR_VTT_P (decl)); if (DECL_FUNCTION_SCOPE_P (decl) && ! DECL_VISIBILITY_SPECIFIED (decl)) { /* Local statics and classes get the visibility of their containing function by default, except that -fvisibility-inlines-hidden doesn't affect them. */ tree fn = DECL_CONTEXT (decl); if (DECL_VISIBILITY_SPECIFIED (fn)) { DECL_VISIBILITY (decl) = DECL_VISIBILITY (fn); DECL_VISIBILITY_SPECIFIED (decl) = DECL_VISIBILITY_SPECIFIED (fn); } else { if (DECL_CLASS_SCOPE_P (fn)) determine_visibility_from_class (decl, DECL_CONTEXT (fn)); else if (determine_hidden_inline (fn)) { DECL_VISIBILITY (decl) = default_visibility; DECL_VISIBILITY_SPECIFIED (decl) = visibility_options.inpragma; } else { DECL_VISIBILITY (decl) = DECL_VISIBILITY (fn); DECL_VISIBILITY_SPECIFIED (decl) = DECL_VISIBILITY_SPECIFIED (fn); } } /* Local classes in templates have CLASSTYPE_USE_TEMPLATE set, but have no TEMPLATE_INFO, so don't try to check it. */ template_decl = NULL_TREE; } else if (VAR_P (decl) && DECL_TINFO_P (decl) && flag_visibility_ms_compat) { /* Under -fvisibility-ms-compat, types are visible by default, even though their contents aren't. */ tree underlying_type = TREE_TYPE (DECL_NAME (decl)); int underlying_vis = type_visibility (underlying_type); if (underlying_vis == VISIBILITY_ANON || (CLASS_TYPE_P (underlying_type) && CLASSTYPE_VISIBILITY_SPECIFIED (underlying_type))) constrain_visibility (decl, underlying_vis, false); else DECL_VISIBILITY (decl) = VISIBILITY_DEFAULT; } else if (VAR_P (decl) && DECL_TINFO_P (decl)) { /* tinfo visibility is based on the type it's for. */ constrain_visibility (decl, type_visibility (TREE_TYPE (DECL_NAME (decl))), false); /* Give the target a chance to override the visibility associated with DECL. */ if (TREE_PUBLIC (decl) && !DECL_REALLY_EXTERN (decl) && CLASS_TYPE_P (TREE_TYPE (DECL_NAME (decl))) && !CLASSTYPE_VISIBILITY_SPECIFIED (TREE_TYPE (DECL_NAME (decl)))) targetm.cxx.determine_class_data_visibility (decl); } else if (template_decl) /* Template instantiations and specializations get visibility based on their template unless they override it with an attribute. */; else if (! DECL_VISIBILITY_SPECIFIED (decl)) { if (determine_hidden_inline (decl)) DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN; else { /* Set default visibility to whatever the user supplied with #pragma GCC visibility or a namespace visibility attribute. */ DECL_VISIBILITY (decl) = default_visibility; DECL_VISIBILITY_SPECIFIED (decl) = visibility_options.inpragma; } } } if (template_decl) { /* If the specialization doesn't specify visibility, use the visibility from the template. */ tree tinfo = get_template_info (template_decl); tree args = TI_ARGS (tinfo); tree attribs = (TREE_CODE (decl) == TYPE_DECL ? TYPE_ATTRIBUTES (TREE_TYPE (decl)) : DECL_ATTRIBUTES (decl)); tree attr = lookup_attribute ("visibility", attribs); if (args != error_mark_node) { tree pattern = DECL_TEMPLATE_RESULT (TI_TEMPLATE (tinfo)); if (!DECL_VISIBILITY_SPECIFIED (decl)) { if (!attr && determine_hidden_inline (decl)) DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN; else { DECL_VISIBILITY (decl) = DECL_VISIBILITY (pattern); DECL_VISIBILITY_SPECIFIED (decl) = DECL_VISIBILITY_SPECIFIED (pattern); } } if (args /* Template argument visibility outweighs #pragma or namespace visibility, but not an explicit attribute. */ && !attr) { int depth = TMPL_ARGS_DEPTH (args); if (DECL_VISIBILITY_SPECIFIED (decl)) { /* A class template member with explicit visibility overrides the class visibility, so we need to apply all the levels of template args directly. */ int i; for (i = 1; i <= depth; ++i) { tree lev = TMPL_ARGS_LEVEL (args, i); constrain_visibility_for_template (decl, lev); } } else if (PRIMARY_TEMPLATE_P (TI_TEMPLATE (tinfo))) /* Limit visibility based on its template arguments. */ constrain_visibility_for_template (decl, args); } } } if (class_type) determine_visibility_from_class (decl, class_type); if (decl_anon_ns_mem_p (decl)) /* Names in an anonymous namespace get internal linkage. This might change once we implement export. */ constrain_visibility (decl, VISIBILITY_ANON, false); else if (TREE_CODE (decl) != TYPE_DECL) { /* Propagate anonymity from type to decl. */ int tvis = type_visibility (TREE_TYPE (decl)); if (tvis == VISIBILITY_ANON || ! DECL_VISIBILITY_SPECIFIED (decl)) constrain_visibility (decl, tvis, false); } else if (no_linkage_check (TREE_TYPE (decl), /*relaxed_p=*/true)) /* DR 757: A type without linkage shall not be used as the type of a variable or function with linkage, unless o the variable or function has extern "C" linkage (7.5 [dcl.link]), or o the variable or function is not used (3.2 [basic.def.odr]) or is defined in the same translation unit. Since non-extern "C" decls need to be defined in the same translation unit, we can make the type internal. */ constrain_visibility (decl, VISIBILITY_ANON, false); /* If visibility changed and DECL already has DECL_RTL, ensure symbol flags are updated. */ if ((DECL_VISIBILITY (decl) != orig_visibility || DECL_VISIBILITY_SPECIFIED (decl) != orig_visibility_specified) && ((VAR_P (decl) && TREE_STATIC (decl)) || TREE_CODE (decl) == FUNCTION_DECL) && DECL_RTL_SET_P (decl)) make_decl_rtl (decl); } /* By default, static data members and function members receive the visibility of their containing class. */ static void determine_visibility_from_class (tree decl, tree class_type) { if (DECL_VISIBILITY_SPECIFIED (decl)) return; if (determine_hidden_inline (decl)) DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN; else { /* Default to the class visibility. */ DECL_VISIBILITY (decl) = CLASSTYPE_VISIBILITY (class_type); DECL_VISIBILITY_SPECIFIED (decl) = CLASSTYPE_VISIBILITY_SPECIFIED (class_type); } /* Give the target a chance to override the visibility associated with DECL. */ if (VAR_P (decl) && TREE_PUBLIC (decl) && (DECL_TINFO_P (decl) || DECL_VTABLE_OR_VTT_P (decl)) && !DECL_REALLY_EXTERN (decl) && !CLASSTYPE_VISIBILITY_SPECIFIED (class_type)) targetm.cxx.determine_class_data_visibility (decl); } /* Returns true iff DECL is an inline that should get hidden visibility because of -fvisibility-inlines-hidden. */ static bool determine_hidden_inline (tree decl) { return (visibility_options.inlines_hidden /* Don't do this for inline templates; specializations might not be inline, and we don't want them to inherit the hidden visibility. We'll set it here for all inline instantiations. */ && !processing_template_decl && TREE_CODE (decl) == FUNCTION_DECL && DECL_DECLARED_INLINE_P (decl) && (! DECL_LANG_SPECIFIC (decl) || ! DECL_EXPLICIT_INSTANTIATION (decl))); } /* Constrain the visibility of a class TYPE based on the visibility of its field types. Warn if any fields require lesser visibility. */ void constrain_class_visibility (tree type) { tree binfo; tree t; int i; int vis = type_visibility (type); if (vis == VISIBILITY_ANON || DECL_IN_SYSTEM_HEADER (TYPE_MAIN_DECL (type))) return; /* Don't warn about visibility if the class has explicit visibility. */ if (CLASSTYPE_VISIBILITY_SPECIFIED (type)) vis = VISIBILITY_INTERNAL; for (t = TYPE_FIELDS (type); t; t = DECL_CHAIN (t)) if (TREE_CODE (t) == FIELD_DECL && TREE_TYPE (t) != error_mark_node && !DECL_ARTIFICIAL (t)) { tree ftype = strip_pointer_or_array_types (TREE_TYPE (t)); int subvis = type_visibility (ftype); if (subvis == VISIBILITY_ANON) { if (!in_main_input_context()) { tree nlt = no_linkage_check (ftype, /*relaxed_p=*/false); if (nlt) { if (same_type_p (TREE_TYPE (t), nlt)) warning (OPT_Wsubobject_linkage, "\ %qT has a field %qD whose type has no linkage", type, t); else warning (OPT_Wsubobject_linkage, "\ %qT has a field %qD whose type depends on the type %qT which has no linkage", type, t, nlt); } else warning (OPT_Wsubobject_linkage, "\ %qT has a field %qD whose type uses the anonymous namespace", type, t); } } else if (MAYBE_CLASS_TYPE_P (ftype) && vis < VISIBILITY_HIDDEN && subvis >= VISIBILITY_HIDDEN) warning (OPT_Wattributes, "\ %qT declared with greater visibility than the type of its field %qD", type, t); } binfo = TYPE_BINFO (type); for (i = 0; BINFO_BASE_ITERATE (binfo, i, t); ++i) { int subvis = type_visibility (TREE_TYPE (t)); if (subvis == VISIBILITY_ANON) { if (!in_main_input_context()) { tree nlt = no_linkage_check (TREE_TYPE (t), /*relaxed_p=*/false); if (nlt) { if (same_type_p (TREE_TYPE (t), nlt)) warning (OPT_Wsubobject_linkage, "\ %qT has a base %qT whose type has no linkage", type, TREE_TYPE (t)); else warning (OPT_Wsubobject_linkage, "\ %qT has a base %qT whose type depends on the type %qT which has no linkage", type, TREE_TYPE (t), nlt); } else warning (OPT_Wsubobject_linkage, "\ %qT has a base %qT whose type uses the anonymous namespace", type, TREE_TYPE (t)); } } else if (vis < VISIBILITY_HIDDEN && subvis >= VISIBILITY_HIDDEN) warning (OPT_Wattributes, "\ %qT declared with greater visibility than its base %qT", type, TREE_TYPE (t)); } } /* Functions for adjusting the visibility of a tagged type and its nested types and declarations when it gets a name for linkage purposes from a typedef. */ static void bt_reset_linkage_1 (binding_entry, void *); static void bt_reset_linkage_2 (binding_entry, void *); /* First reset the visibility of all the types. */ static void reset_type_linkage_1 (tree type) { set_linkage_according_to_type (type, TYPE_MAIN_DECL (type)); if (CLASS_TYPE_P (type)) binding_table_foreach (CLASSTYPE_NESTED_UTDS (type), bt_reset_linkage_1, NULL); } static void bt_reset_linkage_1 (binding_entry b, void */*data*/) { reset_type_linkage_1 (b->type); } /* Then reset the visibility of any static data members or member functions that use those types. */ static void reset_decl_linkage (tree decl) { if (TREE_PUBLIC (decl)) return; if (DECL_CLONED_FUNCTION_P (decl)) return; TREE_PUBLIC (decl) = true; DECL_INTERFACE_KNOWN (decl) = false; determine_visibility (decl); tentative_decl_linkage (decl); } static void reset_type_linkage_2 (tree type) { if (CLASS_TYPE_P (type)) { if (tree vt = CLASSTYPE_VTABLES (type)) { tree name = mangle_vtbl_for_type (type); DECL_NAME (vt) = name; SET_DECL_ASSEMBLER_NAME (vt, name); reset_decl_linkage (vt); } if (tree ti = CLASSTYPE_TYPEINFO_VAR (type)) { tree name = mangle_typeinfo_for_type (type); DECL_NAME (ti) = name; SET_DECL_ASSEMBLER_NAME (ti, name); TREE_TYPE (name) = type; reset_decl_linkage (ti); } for (tree m = TYPE_FIELDS (type); m; m = DECL_CHAIN (m)) { tree mem = STRIP_TEMPLATE (m); if (TREE_CODE (mem) == VAR_DECL || TREE_CODE (mem) == FUNCTION_DECL) reset_decl_linkage (mem); } binding_table_foreach (CLASSTYPE_NESTED_UTDS (type), bt_reset_linkage_2, NULL); } } static void bt_reset_linkage_2 (binding_entry b, void */*data*/) { reset_type_linkage_2 (b->type); } void reset_type_linkage (tree type) { reset_type_linkage_1 (type); reset_type_linkage_2 (type); } /* Set up our initial idea of what the linkage of DECL should be. */ void tentative_decl_linkage (tree decl) { if (DECL_INTERFACE_KNOWN (decl)) /* We've already made a decision as to how this function will be handled. */; else if (vague_linkage_p (decl)) { if (TREE_CODE (decl) == FUNCTION_DECL && decl_defined_p (decl)) { DECL_EXTERNAL (decl) = 1; DECL_NOT_REALLY_EXTERN (decl) = 1; note_vague_linkage_fn (decl); /* A non-template inline function with external linkage will always be COMDAT. As we must eventually determine the linkage of all functions, and as that causes writes to the data mapped in from the PCH file, it's advantageous to mark the functions at this point. */ if (DECL_DECLARED_INLINE_P (decl) && (!DECL_IMPLICIT_INSTANTIATION (decl) || DECL_DEFAULTED_FN (decl))) { /* This function must have external linkage, as otherwise DECL_INTERFACE_KNOWN would have been set. */ gcc_assert (TREE_PUBLIC (decl)); comdat_linkage (decl); DECL_INTERFACE_KNOWN (decl) = 1; } } else if (VAR_P (decl)) maybe_commonize_var (decl); } } /* DECL is a FUNCTION_DECL or VAR_DECL. If the object file linkage for DECL has not already been determined, do so now by setting DECL_EXTERNAL, DECL_COMDAT and other related flags. Until this function is called entities with vague linkage whose definitions are available must have TREE_PUBLIC set. If this function decides to place DECL in COMDAT, it will set appropriate flags -- but will not clear DECL_EXTERNAL. It is up to the caller to decide whether or not to clear DECL_EXTERNAL. Some callers defer that decision until it is clear that DECL is actually required. */ void import_export_decl (tree decl) { bool comdat_p; bool import_p; tree class_type = NULL_TREE; if (DECL_INTERFACE_KNOWN (decl)) return; /* We cannot determine what linkage to give to an entity with vague linkage until the end of the file. For example, a virtual table for a class will be defined if and only if the key method is defined in this translation unit. */ gcc_assert (at_eof); /* Object file linkage for explicit instantiations is handled in mark_decl_instantiated. For static variables in functions with vague linkage, maybe_commonize_var is used. Therefore, the only declarations that should be provided to this function are those with external linkage that are: * implicit instantiations of function templates * inline function * implicit instantiations of static data members of class templates * virtual tables * typeinfo objects Furthermore, all entities that reach this point must have a definition available in this translation unit. The following assertions check these conditions. */ gcc_assert (VAR_OR_FUNCTION_DECL_P (decl)); /* Any code that creates entities with TREE_PUBLIC cleared should also set DECL_INTERFACE_KNOWN. */ gcc_assert (TREE_PUBLIC (decl)); if (TREE_CODE (decl) == FUNCTION_DECL) gcc_assert (DECL_IMPLICIT_INSTANTIATION (decl) || DECL_FRIEND_PSEUDO_TEMPLATE_INSTANTIATION (decl) || DECL_DECLARED_INLINE_P (decl)); else gcc_assert (DECL_IMPLICIT_INSTANTIATION (decl) || DECL_VTABLE_OR_VTT_P (decl) || DECL_TINFO_P (decl)); /* Check that a definition of DECL is available in this translation unit. */ gcc_assert (!DECL_REALLY_EXTERN (decl)); /* Assume that DECL will not have COMDAT linkage. */ comdat_p = false; /* Assume that DECL will not be imported into this translation unit. */ import_p = false; if (VAR_P (decl) && DECL_VTABLE_OR_VTT_P (decl)) { class_type = DECL_CONTEXT (decl); import_export_class (class_type); if (CLASSTYPE_INTERFACE_KNOWN (class_type) && CLASSTYPE_INTERFACE_ONLY (class_type)) import_p = true; else if ((!flag_weak || TARGET_WEAK_NOT_IN_ARCHIVE_TOC) && !CLASSTYPE_USE_TEMPLATE (class_type) && CLASSTYPE_KEY_METHOD (class_type) && !DECL_DECLARED_INLINE_P (CLASSTYPE_KEY_METHOD (class_type))) /* The ABI requires that all virtual tables be emitted with COMDAT linkage. However, on systems where COMDAT symbols don't show up in the table of contents for a static archive, or on systems without weak symbols (where we approximate COMDAT linkage by using internal linkage), the linker will report errors about undefined symbols because it will not see the virtual table definition. Therefore, in the case that we know that the virtual table will be emitted in only one translation unit, we make the virtual table an ordinary definition with external linkage. */ DECL_EXTERNAL (decl) = 0; else if (CLASSTYPE_INTERFACE_KNOWN (class_type)) { /* CLASS_TYPE is being exported from this translation unit, so DECL should be defined here. */ if (!flag_weak && CLASSTYPE_EXPLICIT_INSTANTIATION (class_type)) /* If a class is declared in a header with the "extern template" extension, then it will not be instantiated, even in translation units that would normally require it. Often such classes are explicitly instantiated in one translation unit. Therefore, the explicit instantiation must be made visible to other translation units. */ DECL_EXTERNAL (decl) = 0; else { /* The generic C++ ABI says that class data is always COMDAT, even if there is a key function. Some variants (e.g., the ARM EABI) says that class data only has COMDAT linkage if the class data might be emitted in more than one translation unit. When the key method can be inline and is inline, we still have to arrange for comdat even though class_data_always_comdat is false. */ if (!CLASSTYPE_KEY_METHOD (class_type) || DECL_DECLARED_INLINE_P (CLASSTYPE_KEY_METHOD (class_type)) || targetm.cxx.class_data_always_comdat ()) { /* The ABI requires COMDAT linkage. Normally, we only emit COMDAT things when they are needed; make sure that we realize that this entity is indeed needed. */ comdat_p = true; mark_needed (decl); } } } else if (!flag_implicit_templates && CLASSTYPE_IMPLICIT_INSTANTIATION (class_type)) import_p = true; else comdat_p = true; } else if (VAR_P (decl) && DECL_TINFO_P (decl)) { tree type = TREE_TYPE (DECL_NAME (decl)); if (CLASS_TYPE_P (type)) { class_type = type; import_export_class (type); if (CLASSTYPE_INTERFACE_KNOWN (type) && TYPE_POLYMORPHIC_P (type) && CLASSTYPE_INTERFACE_ONLY (type) /* If -fno-rtti was specified, then we cannot be sure that RTTI information will be emitted with the virtual table of the class, so we must emit it wherever it is used. */ && flag_rtti) import_p = true; else { if (CLASSTYPE_INTERFACE_KNOWN (type) && !CLASSTYPE_INTERFACE_ONLY (type)) { comdat_p = (targetm.cxx.class_data_always_comdat () || (CLASSTYPE_KEY_METHOD (type) && DECL_DECLARED_INLINE_P (CLASSTYPE_KEY_METHOD (type)))); mark_needed (decl); if (!flag_weak) { comdat_p = false; DECL_EXTERNAL (decl) = 0; } } else comdat_p = true; } } else comdat_p = true; } else if (DECL_TEMPLOID_INSTANTIATION (decl)) { /* DECL is an implicit instantiation of a function or static data member. */ if (flag_implicit_templates || (flag_implicit_inline_templates && TREE_CODE (decl) == FUNCTION_DECL && DECL_DECLARED_INLINE_P (decl))) comdat_p = true; else /* If we are not implicitly generating templates, then mark this entity as undefined in this translation unit. */ import_p = true; } else if (DECL_FUNCTION_MEMBER_P (decl)) { if (!DECL_DECLARED_INLINE_P (decl)) { tree ctype = DECL_CONTEXT (decl); import_export_class (ctype); if (CLASSTYPE_INTERFACE_KNOWN (ctype)) { DECL_NOT_REALLY_EXTERN (decl) = ! (CLASSTYPE_INTERFACE_ONLY (ctype) || (DECL_DECLARED_INLINE_P (decl) && ! flag_implement_inlines && !DECL_VINDEX (decl))); if (!DECL_NOT_REALLY_EXTERN (decl)) DECL_EXTERNAL (decl) = 1; /* Always make artificials weak. */ if (DECL_ARTIFICIAL (decl) && flag_weak) comdat_p = true; else maybe_make_one_only (decl); } } else comdat_p = true; } else comdat_p = true; if (import_p) { /* If we are importing DECL into this translation unit, mark is an undefined here. */ DECL_EXTERNAL (decl) = 1; DECL_NOT_REALLY_EXTERN (decl) = 0; } else if (comdat_p) { /* If we decided to put DECL in COMDAT, mark it accordingly at this point. */ comdat_linkage (decl); } DECL_INTERFACE_KNOWN (decl) = 1; } /* Return an expression that performs the destruction of DECL, which must be a VAR_DECL whose type has a non-trivial destructor, or is an array whose (innermost) elements have a non-trivial destructor. */ tree build_cleanup (tree decl) { tree clean = cxx_maybe_build_cleanup (decl, tf_warning_or_error); gcc_assert (clean != NULL_TREE); return clean; } /* GUARD is a helper variable for DECL; make them have the same linkage and visibility. */ void copy_linkage (tree guard, tree decl) { TREE_PUBLIC (guard) = TREE_PUBLIC (decl); TREE_STATIC (guard) = TREE_STATIC (decl); DECL_COMMON (guard) = DECL_COMMON (decl); DECL_COMDAT (guard) = DECL_COMDAT (decl); if (TREE_STATIC (guard)) { CP_DECL_THREAD_LOCAL_P (guard) = CP_DECL_THREAD_LOCAL_P (decl); set_decl_tls_model (guard, DECL_TLS_MODEL (decl)); if (DECL_ONE_ONLY (decl)) make_decl_one_only (guard, cxx_comdat_group (guard)); if (TREE_PUBLIC (decl)) DECL_WEAK (guard) = DECL_WEAK (decl); /* Also check vague_linkage_p, as DECL_WEAK and DECL_ONE_ONLY might not be set until import_export_decl at EOF. */ if (vague_linkage_p (decl)) comdat_linkage (guard); DECL_VISIBILITY (guard) = DECL_VISIBILITY (decl); DECL_VISIBILITY_SPECIFIED (guard) = DECL_VISIBILITY_SPECIFIED (decl); } } /* Returns the initialization guard variable for the variable DECL, which has static storage duration. */ tree get_guard (tree decl) { tree sname; tree guard; sname = mangle_guard_variable (decl); guard = get_global_binding (sname); if (! guard) { tree guard_type; /* We use a type that is big enough to contain a mutex as well as an integer counter. */ guard_type = targetm.cxx.guard_type (); guard = build_decl (DECL_SOURCE_LOCATION (decl), VAR_DECL, sname, guard_type); /* The guard should have the same linkage as what it guards. */ copy_linkage (guard, decl); DECL_ARTIFICIAL (guard) = 1; DECL_IGNORED_P (guard) = 1; TREE_USED (guard) = 1; pushdecl_top_level_and_finish (guard, NULL_TREE); } return guard; } /* Return an atomic load of src with the appropriate memory model. */ static tree build_atomic_load_byte (tree src, HOST_WIDE_INT model) { tree ptr_type = build_pointer_type (char_type_node); tree mem_model = build_int_cst (integer_type_node, model); tree t, addr, val; unsigned int size; int fncode; size = tree_to_uhwi (TYPE_SIZE_UNIT (char_type_node)); fncode = BUILT_IN_ATOMIC_LOAD_N + exact_log2 (size) + 1; t = builtin_decl_implicit ((enum built_in_function) fncode); addr = build1 (ADDR_EXPR, ptr_type, src); val = build_call_expr (t, 2, addr, mem_model); return val; } /* Return those bits of the GUARD variable that should be set when the guarded entity is actually initialized. */ static tree get_guard_bits (tree guard) { if (!targetm.cxx.guard_mask_bit ()) { /* We only set the first byte of the guard, in order to leave room for a mutex in the high-order bits. */ guard = build1 (ADDR_EXPR, build_pointer_type (TREE_TYPE (guard)), guard); guard = build1 (NOP_EXPR, build_pointer_type (char_type_node), guard); guard = build1 (INDIRECT_REF, char_type_node, guard); } return guard; } /* Return an expression which determines whether or not the GUARD variable has already been initialized. */ tree get_guard_cond (tree guard, bool thread_safe) { tree guard_value; if (!thread_safe) guard = get_guard_bits (guard); else guard = build_atomic_load_byte (guard, MEMMODEL_ACQUIRE); /* Mask off all but the low bit. */ if (targetm.cxx.guard_mask_bit ()) { guard_value = integer_one_node; if (!same_type_p (TREE_TYPE (guard_value), TREE_TYPE (guard))) guard_value = fold_convert (TREE_TYPE (guard), guard_value); guard = cp_build_binary_op (input_location, BIT_AND_EXPR, guard, guard_value, tf_warning_or_error); } guard_value = integer_zero_node; if (!same_type_p (TREE_TYPE (guard_value), TREE_TYPE (guard))) guard_value = fold_convert (TREE_TYPE (guard), guard_value); return cp_build_binary_op (input_location, EQ_EXPR, guard, guard_value, tf_warning_or_error); } /* Return an expression which sets the GUARD variable, indicating that the variable being guarded has been initialized. */ tree set_guard (tree guard) { tree guard_init; /* Set the GUARD to one. */ guard = get_guard_bits (guard); guard_init = integer_one_node; if (!same_type_p (TREE_TYPE (guard_init), TREE_TYPE (guard))) guard_init = fold_convert (TREE_TYPE (guard), guard_init); return cp_build_modify_expr (input_location, guard, NOP_EXPR, guard_init, tf_warning_or_error); } /* Returns true iff we can tell that VAR does not have a dynamic initializer. */ static bool var_defined_without_dynamic_init (tree var) { /* If it's defined in another TU, we can't tell. */ if (DECL_EXTERNAL (var)) return false; /* If it has a non-trivial destructor, registering the destructor counts as dynamic initialization. */ if (TYPE_HAS_NONTRIVIAL_DESTRUCTOR (TREE_TYPE (var))) return false; /* If it's in this TU, its initializer has been processed, unless it's a case of self-initialization, then DECL_INITIALIZED_P is false while the initializer is handled by finish_id_expression. */ if (!DECL_INITIALIZED_P (var)) return false; /* If it has no initializer or a constant one, it's not dynamic. */ return (!DECL_NONTRIVIALLY_INITIALIZED_P (var) || DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (var)); } /* Returns true iff VAR is a variable that needs uses to be wrapped for possible dynamic initialization. */ static bool var_needs_tls_wrapper (tree var) { return (!error_operand_p (var) && CP_DECL_THREAD_LOCAL_P (var) && !DECL_GNU_TLS_P (var) && !DECL_FUNCTION_SCOPE_P (var) && !var_defined_without_dynamic_init (var)); } /* Get the FUNCTION_DECL for the shared TLS init function for this translation unit. */ static tree get_local_tls_init_fn (location_t loc) { tree sname = get_identifier ("__tls_init"); tree fn = get_global_binding (sname); if (!fn) { fn = build_lang_decl_loc (loc, FUNCTION_DECL, sname, build_function_type (void_type_node, void_list_node)); SET_DECL_LANGUAGE (fn, lang_c); TREE_PUBLIC (fn) = false; DECL_ARTIFICIAL (fn) = true; mark_used (fn); set_global_binding (fn); } return fn; } /* Get a FUNCTION_DECL for the init function for the thread_local variable VAR. The init function will be an alias to the function that initializes all the non-local TLS variables in the translation unit. The init function is only used by the wrapper function. */ static tree get_tls_init_fn (tree var) { /* Only C++11 TLS vars need this init fn. */ if (!var_needs_tls_wrapper (var)) return NULL_TREE; /* If -fno-extern-tls-init, assume that we don't need to call a tls init function for a variable defined in another TU. */ if (!flag_extern_tls_init && DECL_EXTERNAL (var)) return NULL_TREE; /* If the variable is internal, or if we can't generate aliases, call the local init function directly. */ if (!TREE_PUBLIC (var) || !TARGET_SUPPORTS_ALIASES) return get_local_tls_init_fn (DECL_SOURCE_LOCATION (var)); tree sname = mangle_tls_init_fn (var); tree fn = get_global_binding (sname); if (!fn) { fn = build_lang_decl (FUNCTION_DECL, sname, build_function_type (void_type_node, void_list_node)); SET_DECL_LANGUAGE (fn, lang_c); TREE_PUBLIC (fn) = TREE_PUBLIC (var); DECL_ARTIFICIAL (fn) = true; DECL_COMDAT (fn) = DECL_COMDAT (var); DECL_EXTERNAL (fn) = DECL_EXTERNAL (var); if (DECL_ONE_ONLY (var)) make_decl_one_only (fn, cxx_comdat_group (fn)); if (TREE_PUBLIC (var)) { tree obtype = strip_array_types (non_reference (TREE_TYPE (var))); /* If the variable is defined somewhere else and might have static initialization, make the init function a weak reference. */ if ((!TYPE_NEEDS_CONSTRUCTING (obtype) || TYPE_HAS_CONSTEXPR_CTOR (obtype) || TYPE_HAS_TRIVIAL_DFLT (obtype)) && TYPE_HAS_TRIVIAL_DESTRUCTOR (obtype) && DECL_EXTERNAL (var)) declare_weak (fn); else DECL_WEAK (fn) = DECL_WEAK (var); } DECL_VISIBILITY (fn) = DECL_VISIBILITY (var); DECL_VISIBILITY_SPECIFIED (fn) = DECL_VISIBILITY_SPECIFIED (var); DECL_DLLIMPORT_P (fn) = DECL_DLLIMPORT_P (var); DECL_IGNORED_P (fn) = 1; mark_used (fn); DECL_BEFRIENDING_CLASSES (fn) = var; set_global_binding (fn); } return fn; } /* Get a FUNCTION_DECL for the init wrapper function for the thread_local variable VAR. The wrapper function calls the init function (if any) for VAR and then returns a reference to VAR. The wrapper function is used in place of VAR everywhere VAR is mentioned. */ static tree get_tls_wrapper_fn (tree var) { /* Only C++11 TLS vars need this wrapper fn. */ if (!var_needs_tls_wrapper (var)) return NULL_TREE; tree sname = mangle_tls_wrapper_fn (var); tree fn = get_global_binding (sname); if (!fn) { /* A named rvalue reference is an lvalue, so the wrapper should always return an lvalue reference. */ tree type = non_reference (TREE_TYPE (var)); type = build_reference_type (type); tree fntype = build_function_type (type, void_list_node); fn = build_lang_decl_loc (DECL_SOURCE_LOCATION (var), FUNCTION_DECL, sname, fntype); SET_DECL_LANGUAGE (fn, lang_c); TREE_PUBLIC (fn) = TREE_PUBLIC (var); DECL_ARTIFICIAL (fn) = true; DECL_IGNORED_P (fn) = 1; /* The wrapper is inline and emitted everywhere var is used. */ DECL_DECLARED_INLINE_P (fn) = true; if (TREE_PUBLIC (var)) { comdat_linkage (fn); #ifdef HAVE_GAS_HIDDEN /* Make the wrapper bind locally; there's no reason to share the wrapper between multiple shared objects. */ DECL_VISIBILITY (fn) = VISIBILITY_INTERNAL; DECL_VISIBILITY_SPECIFIED (fn) = true; #endif } if (!TREE_PUBLIC (fn)) DECL_INTERFACE_KNOWN (fn) = true; mark_used (fn); note_vague_linkage_fn (fn); #if 0 /* We want CSE to commonize calls to the wrapper, but marking it as pure is unsafe since it has side-effects. I guess we need a new ECF flag even weaker than ECF_PURE. FIXME! */ DECL_PURE_P (fn) = true; #endif DECL_BEFRIENDING_CLASSES (fn) = var; set_global_binding (fn); } return fn; } /* If EXPR is a thread_local variable that should be wrapped by init wrapper function, return a call to that function, otherwise return NULL. */ tree maybe_get_tls_wrapper_call (tree expr) { if (VAR_P (expr) && !processing_template_decl && !cp_unevaluated_operand && CP_DECL_THREAD_LOCAL_P (expr)) if (tree wrap = get_tls_wrapper_fn (expr)) return build_cxx_call (wrap, 0, NULL, tf_warning_or_error); return NULL; } /* At EOF, generate the definition for the TLS wrapper function FN: T& var_wrapper() { if (init_fn) init_fn(); return var; } */ static void generate_tls_wrapper (tree fn) { tree var = DECL_BEFRIENDING_CLASSES (fn); start_preparsed_function (fn, NULL_TREE, SF_DEFAULT | SF_PRE_PARSED); tree body = begin_function_body (); /* Only call the init fn if there might be one. */ if (tree init_fn = get_tls_init_fn (var)) { tree if_stmt = NULL_TREE; /* If init_fn is a weakref, make sure it exists before calling. */ if (lookup_attribute ("weak", DECL_ATTRIBUTES (init_fn))) { if_stmt = begin_if_stmt (); tree addr = cp_build_addr_expr (init_fn, tf_warning_or_error); tree cond = cp_build_binary_op (DECL_SOURCE_LOCATION (var), NE_EXPR, addr, nullptr_node, tf_warning_or_error); finish_if_stmt_cond (cond, if_stmt); } finish_expr_stmt (build_cxx_call (init_fn, 0, NULL, tf_warning_or_error)); if (if_stmt) { finish_then_clause (if_stmt); finish_if_stmt (if_stmt); } } else /* If there's no initialization, the wrapper is a constant function. */ TREE_READONLY (fn) = true; finish_return_stmt (convert_from_reference (var)); finish_function_body (body); expand_or_defer_fn (finish_function (/*inline_p=*/false)); } /* Start the process of running a particular set of global constructors or destructors. Subroutine of do_[cd]tors. Also called from vtv_start_verification_constructor_init_function. */ static tree start_objects (int method_type, int initp) { tree body; tree fndecl; char type[14]; /* Make ctor or dtor function. METHOD_TYPE may be 'I' or 'D'. */ if (initp != DEFAULT_INIT_PRIORITY) { char joiner; #ifdef JOINER joiner = JOINER; #else joiner = '_'; #endif sprintf (type, "sub_%c%c%.5u", method_type, joiner, initp); } else sprintf (type, "sub_%c", method_type); fndecl = build_lang_decl (FUNCTION_DECL, get_file_function_name (type), build_function_type_list (void_type_node, NULL_TREE)); start_preparsed_function (fndecl, /*attrs=*/NULL_TREE, SF_PRE_PARSED); TREE_PUBLIC (current_function_decl) = 0; /* Mark as artificial because it's not explicitly in the user's source code. */ DECL_ARTIFICIAL (current_function_decl) = 1; /* Mark this declaration as used to avoid spurious warnings. */ TREE_USED (current_function_decl) = 1; /* Mark this function as a global constructor or destructor. */ if (method_type == 'I') DECL_GLOBAL_CTOR_P (current_function_decl) = 1; else DECL_GLOBAL_DTOR_P (current_function_decl) = 1; body = begin_compound_stmt (BCS_FN_BODY); return body; } /* Finish the process of running a particular set of global constructors or destructors. Subroutine of do_[cd]tors. */ static void finish_objects (int method_type, int initp, tree body) { tree fn; /* Finish up. */ finish_compound_stmt (body); fn = finish_function (/*inline_p=*/false); if (method_type == 'I') { DECL_STATIC_CONSTRUCTOR (fn) = 1; decl_init_priority_insert (fn, initp); } else { DECL_STATIC_DESTRUCTOR (fn) = 1; decl_fini_priority_insert (fn, initp); } expand_or_defer_fn (fn); } /* The names of the parameters to the function created to handle initializations and destructions for objects with static storage duration. */ #define INITIALIZE_P_IDENTIFIER "__initialize_p" #define PRIORITY_IDENTIFIER "__priority" /* The name of the function we create to handle initializations and destructions for objects with static storage duration. */ #define SSDF_IDENTIFIER "__static_initialization_and_destruction" /* The declaration for the __INITIALIZE_P argument. */ static GTY(()) tree initialize_p_decl; /* The declaration for the __PRIORITY argument. */ static GTY(()) tree priority_decl; /* The declaration for the static storage duration function. */ static GTY(()) tree ssdf_decl; /* All the static storage duration functions created in this translation unit. */ static GTY(()) vec<tree, va_gc> *ssdf_decls; /* A map from priority levels to information about that priority level. There may be many such levels, so efficient lookup is important. */ static splay_tree priority_info_map; /* Begins the generation of the function that will handle all initialization and destruction of objects with static storage duration. The function generated takes two parameters of type `int': __INITIALIZE_P and __PRIORITY. If __INITIALIZE_P is nonzero, it performs initializations. Otherwise, it performs destructions. It only performs those initializations or destructions with the indicated __PRIORITY. The generated function returns no value. It is assumed that this function will only be called once per translation unit. */ static tree start_static_storage_duration_function (unsigned count) { tree type; tree body; char id[sizeof (SSDF_IDENTIFIER) + 1 /* '\0' */ + 32]; /* Create the identifier for this function. It will be of the form SSDF_IDENTIFIER_<number>. */ sprintf (id, "%s_%u", SSDF_IDENTIFIER, count); type = build_function_type_list (void_type_node, integer_type_node, integer_type_node, NULL_TREE); /* Create the FUNCTION_DECL itself. */ ssdf_decl = build_lang_decl (FUNCTION_DECL, get_identifier (id), type); TREE_PUBLIC (ssdf_decl) = 0; DECL_ARTIFICIAL (ssdf_decl) = 1; /* Put this function in the list of functions to be called from the static constructors and destructors. */ if (!ssdf_decls) { vec_alloc (ssdf_decls, 32); /* Take this opportunity to initialize the map from priority numbers to information about that priority level. */ priority_info_map = splay_tree_new (splay_tree_compare_ints, /*delete_key_fn=*/0, /*delete_value_fn=*/ splay_tree_delete_pointers); /* We always need to generate functions for the DEFAULT_INIT_PRIORITY so enter it now. That way when we walk priorities later, we'll be sure to find the DEFAULT_INIT_PRIORITY. */ get_priority_info (DEFAULT_INIT_PRIORITY); } vec_safe_push (ssdf_decls, ssdf_decl); /* Create the argument list. */ initialize_p_decl = cp_build_parm_decl (ssdf_decl, get_identifier (INITIALIZE_P_IDENTIFIER), integer_type_node); TREE_USED (initialize_p_decl) = 1; priority_decl = cp_build_parm_decl (ssdf_decl, get_identifier (PRIORITY_IDENTIFIER), integer_type_node); TREE_USED (priority_decl) = 1; DECL_CHAIN (initialize_p_decl) = priority_decl; DECL_ARGUMENTS (ssdf_decl) = initialize_p_decl; /* Put the function in the global scope. */ pushdecl (ssdf_decl); /* Start the function itself. This is equivalent to declaring the function as: static void __ssdf (int __initialize_p, init __priority_p); It is static because we only need to call this function from the various constructor and destructor functions for this module. */ start_preparsed_function (ssdf_decl, /*attrs=*/NULL_TREE, SF_PRE_PARSED); /* Set up the scope of the outermost block in the function. */ body = begin_compound_stmt (BCS_FN_BODY); return body; } /* Finish the generation of the function which performs initialization and destruction of objects with static storage duration. After this point, no more such objects can be created. */ static void finish_static_storage_duration_function (tree body) { /* Close out the function. */ finish_compound_stmt (body); expand_or_defer_fn (finish_function (/*inline_p=*/false)); } /* Return the information about the indicated PRIORITY level. If no code to handle this level has yet been generated, generate the appropriate prologue. */ static priority_info get_priority_info (int priority) { priority_info pi; splay_tree_node n; n = splay_tree_lookup (priority_info_map, (splay_tree_key) priority); if (!n) { /* Create a new priority information structure, and insert it into the map. */ pi = XNEW (struct priority_info_s); pi->initializations_p = 0; pi->destructions_p = 0; splay_tree_insert (priority_info_map, (splay_tree_key) priority, (splay_tree_value) pi); } else pi = (priority_info) n->value; return pi; } /* The effective initialization priority of a DECL. */ #define DECL_EFFECTIVE_INIT_PRIORITY(decl) \ ((!DECL_HAS_INIT_PRIORITY_P (decl) || DECL_INIT_PRIORITY (decl) == 0) \ ? DEFAULT_INIT_PRIORITY : DECL_INIT_PRIORITY (decl)) /* Whether a DECL needs a guard to protect it against multiple initialization. */ #define NEEDS_GUARD_P(decl) (TREE_PUBLIC (decl) && (DECL_COMMON (decl) \ || DECL_ONE_ONLY (decl) \ || DECL_WEAK (decl))) /* Called from one_static_initialization_or_destruction(), via walk_tree. Walks the initializer list of a global variable and looks for temporary variables (DECL_NAME() == NULL and DECL_ARTIFICIAL != 0) and that have their DECL_CONTEXT() == NULL. For each such temporary variable, set their DECL_CONTEXT() to the current function. This is necessary because otherwise some optimizers (enabled by -O2 -fprofile-arcs) might crash when trying to refer to a temporary variable that does not have it's DECL_CONTECT() properly set. */ static tree fix_temporary_vars_context_r (tree *node, int * /*unused*/, void * /*unused1*/) { gcc_assert (current_function_decl); if (TREE_CODE (*node) == BIND_EXPR) { tree var; for (var = BIND_EXPR_VARS (*node); var; var = DECL_CHAIN (var)) if (VAR_P (var) && !DECL_NAME (var) && DECL_ARTIFICIAL (var) && !DECL_CONTEXT (var)) DECL_CONTEXT (var) = current_function_decl; } return NULL_TREE; } /* Set up to handle the initialization or destruction of DECL. If INITP is nonzero, we are initializing the variable. Otherwise, we are destroying it. */ static void one_static_initialization_or_destruction (tree decl, tree init, bool initp) { tree guard_if_stmt = NULL_TREE; tree guard; /* If we are supposed to destruct and there's a trivial destructor, nothing has to be done. */ if (!initp && TYPE_HAS_TRIVIAL_DESTRUCTOR (TREE_TYPE (decl))) return; /* Trick the compiler into thinking we are at the file and line where DECL was declared so that error-messages make sense, and so that the debugger will show somewhat sensible file and line information. */ input_location = DECL_SOURCE_LOCATION (decl); /* Make sure temporary variables in the initialiser all have their DECL_CONTEXT() set to a value different from NULL_TREE. This can happen when global variables initializers are built. In that case, the DECL_CONTEXT() of the global variables _AND_ of all the temporary variables that might have been generated in the accompanying initializers is NULL_TREE, meaning the variables have been declared in the global namespace. What we want to do here is to fix that and make sure the DECL_CONTEXT() of the temporaries are set to the current function decl. */ cp_walk_tree_without_duplicates (&init, fix_temporary_vars_context_r, NULL); /* Because of: [class.access.spec] Access control for implicit calls to the constructors, the conversion functions, or the destructor called to create and destroy a static data member is performed as if these calls appeared in the scope of the member's class. we pretend we are in a static member function of the class of which the DECL is a member. */ if (member_p (decl)) { DECL_CONTEXT (current_function_decl) = DECL_CONTEXT (decl); DECL_STATIC_FUNCTION_P (current_function_decl) = 1; } /* Assume we don't need a guard. */ guard = NULL_TREE; /* We need a guard if this is an object with external linkage that might be initialized in more than one place. (For example, a static data member of a template, when the data member requires construction.) */ if (NEEDS_GUARD_P (decl)) { tree guard_cond; guard = get_guard (decl); /* When using __cxa_atexit, we just check the GUARD as we would for a local static. */ if (flag_use_cxa_atexit) { /* When using __cxa_atexit, we never try to destroy anything from a static destructor. */ gcc_assert (initp); guard_cond = get_guard_cond (guard, false); } /* If we don't have __cxa_atexit, then we will be running destructors from .fini sections, or their equivalents. So, we need to know how many times we've tried to initialize this object. We do initializations only if the GUARD is zero, i.e., if we are the first to initialize the variable. We do destructions only if the GUARD is one, i.e., if we are the last to destroy the variable. */ else if (initp) guard_cond = cp_build_binary_op (input_location, EQ_EXPR, cp_build_unary_op (PREINCREMENT_EXPR, guard, /*noconvert=*/true, tf_warning_or_error), integer_one_node, tf_warning_or_error); else guard_cond = cp_build_binary_op (input_location, EQ_EXPR, cp_build_unary_op (PREDECREMENT_EXPR, guard, /*noconvert=*/true, tf_warning_or_error), integer_zero_node, tf_warning_or_error); guard_if_stmt = begin_if_stmt (); finish_if_stmt_cond (guard_cond, guard_if_stmt); } /* If we're using __cxa_atexit, we have not already set the GUARD, so we must do so now. */ if (guard && initp && flag_use_cxa_atexit) finish_expr_stmt (set_guard (guard)); /* Perform the initialization or destruction. */ if (initp) { if (init) { finish_expr_stmt (init); if (sanitize_flags_p (SANITIZE_ADDRESS, decl)) { varpool_node *vnode = varpool_node::get (decl); if (vnode) vnode->dynamically_initialized = 1; } } /* If we're using __cxa_atexit, register a function that calls the destructor for the object. */ if (flag_use_cxa_atexit) finish_expr_stmt (register_dtor_fn (decl)); } else finish_expr_stmt (build_cleanup (decl)); /* Finish the guard if-stmt, if necessary. */ if (guard) { finish_then_clause (guard_if_stmt); finish_if_stmt (guard_if_stmt); } /* Now that we're done with DECL we don't need to pretend to be a member of its class any longer. */ DECL_CONTEXT (current_function_decl) = NULL_TREE; DECL_STATIC_FUNCTION_P (current_function_decl) = 0; } /* Generate code to do the initialization or destruction of the decls in VARS, a TREE_LIST of VAR_DECL with static storage duration. Whether initialization or destruction is performed is specified by INITP. */ static void do_static_initialization_or_destruction (tree vars, bool initp) { tree node, init_if_stmt, cond; /* Build the outer if-stmt to check for initialization or destruction. */ init_if_stmt = begin_if_stmt (); cond = initp ? integer_one_node : integer_zero_node; cond = cp_build_binary_op (input_location, EQ_EXPR, initialize_p_decl, cond, tf_warning_or_error); finish_if_stmt_cond (cond, init_if_stmt); /* To make sure dynamic construction doesn't access globals from other compilation units where they might not be yet constructed, for -fsanitize=address insert __asan_before_dynamic_init call that prevents access to either all global variables that need construction in other compilation units, or at least those that haven't been initialized yet. Variables that need dynamic construction in the current compilation unit are kept accessible. */ if (initp && (flag_sanitize & SANITIZE_ADDRESS)) finish_expr_stmt (asan_dynamic_init_call (/*after_p=*/false)); node = vars; do { tree decl = TREE_VALUE (node); tree priority_if_stmt; int priority; priority_info pi; /* If we don't need a destructor, there's nothing to do. Avoid creating a possibly empty if-stmt. */ if (!initp && TYPE_HAS_TRIVIAL_DESTRUCTOR (TREE_TYPE (decl))) { node = TREE_CHAIN (node); continue; } /* Remember that we had an initialization or finalization at this priority. */ priority = DECL_EFFECTIVE_INIT_PRIORITY (decl); pi = get_priority_info (priority); if (initp) pi->initializations_p = 1; else pi->destructions_p = 1; /* Conditionalize this initialization on being in the right priority and being initializing/finalizing appropriately. */ priority_if_stmt = begin_if_stmt (); cond = cp_build_binary_op (input_location, EQ_EXPR, priority_decl, build_int_cst (NULL_TREE, priority), tf_warning_or_error); finish_if_stmt_cond (cond, priority_if_stmt); /* Process initializers with same priority. */ for (; node && DECL_EFFECTIVE_INIT_PRIORITY (TREE_VALUE (node)) == priority; node = TREE_CHAIN (node)) /* Do one initialization or destruction. */ one_static_initialization_or_destruction (TREE_VALUE (node), TREE_PURPOSE (node), initp); /* Finish up the priority if-stmt body. */ finish_then_clause (priority_if_stmt); finish_if_stmt (priority_if_stmt); } while (node); /* Revert what __asan_before_dynamic_init did by calling __asan_after_dynamic_init. */ if (initp && (flag_sanitize & SANITIZE_ADDRESS)) finish_expr_stmt (asan_dynamic_init_call (/*after_p=*/true)); /* Finish up the init/destruct if-stmt body. */ finish_then_clause (init_if_stmt); finish_if_stmt (init_if_stmt); } /* VARS is a list of variables with static storage duration which may need initialization and/or finalization. Remove those variables that don't really need to be initialized or finalized, and return the resulting list. The order in which the variables appear in VARS is in reverse order of the order in which they should actually be initialized. The list we return is in the unreversed order; i.e., the first variable should be initialized first. */ static tree prune_vars_needing_no_initialization (tree *vars) { tree *var = vars; tree result = NULL_TREE; while (*var) { tree t = *var; tree decl = TREE_VALUE (t); tree init = TREE_PURPOSE (t); /* Deal gracefully with error. */ if (error_operand_p (decl)) { var = &TREE_CHAIN (t); continue; } /* The only things that can be initialized are variables. */ gcc_assert (VAR_P (decl)); /* If this object is not defined, we don't need to do anything here. */ if (DECL_EXTERNAL (decl)) { var = &TREE_CHAIN (t); continue; } /* Also, if the initializer already contains errors, we can bail out now. */ if (init && TREE_CODE (init) == TREE_LIST && value_member (error_mark_node, init)) { var = &TREE_CHAIN (t); continue; } /* This variable is going to need initialization and/or finalization, so we add it to the list. */ *var = TREE_CHAIN (t); TREE_CHAIN (t) = result; result = t; } return result; } /* Make sure we have told the back end about all the variables in VARS. */ static void write_out_vars (tree vars) { tree v; for (v = vars; v; v = TREE_CHAIN (v)) { tree var = TREE_VALUE (v); if (!var_finalized_p (var)) { import_export_decl (var); rest_of_decl_compilation (var, 1, 1); } } } /* Generate a static constructor (if CONSTRUCTOR_P) or destructor (otherwise) that will initialize all global objects with static storage duration having the indicated PRIORITY. */ static void generate_ctor_or_dtor_function (bool constructor_p, int priority, location_t *locus) { char function_key; tree fndecl; tree body; size_t i; input_location = *locus; /* ??? */ /* Was: locus->line++; */ /* We use `I' to indicate initialization and `D' to indicate destruction. */ function_key = constructor_p ? 'I' : 'D'; /* We emit the function lazily, to avoid generating empty global constructors and destructors. */ body = NULL_TREE; /* For Objective-C++, we may need to initialize metadata found in this module. This must be done _before_ any other static initializations. */ if (c_dialect_objc () && (priority == DEFAULT_INIT_PRIORITY) && constructor_p && objc_static_init_needed_p ()) { body = start_objects (function_key, priority); objc_generate_static_init_call (NULL_TREE); } /* Call the static storage duration function with appropriate arguments. */ FOR_EACH_VEC_SAFE_ELT (ssdf_decls, i, fndecl) { /* Calls to pure or const functions will expand to nothing. */ if (! (flags_from_decl_or_type (fndecl) & (ECF_CONST | ECF_PURE))) { tree call; if (! body) body = start_objects (function_key, priority); call = cp_build_function_call_nary (fndecl, tf_warning_or_error, build_int_cst (NULL_TREE, constructor_p), build_int_cst (NULL_TREE, priority), NULL_TREE); finish_expr_stmt (call); } } /* Close out the function. */ if (body) finish_objects (function_key, priority, body); } /* Generate constructor and destructor functions for the priority indicated by N. */ static int generate_ctor_and_dtor_functions_for_priority (splay_tree_node n, void * data) { location_t *locus = (location_t *) data; int priority = (int) n->key; priority_info pi = (priority_info) n->value; /* Generate the functions themselves, but only if they are really needed. */ if (pi->initializations_p) generate_ctor_or_dtor_function (/*constructor_p=*/true, priority, locus); if (pi->destructions_p) generate_ctor_or_dtor_function (/*constructor_p=*/false, priority, locus); /* Keep iterating. */ return 0; } /* Return C++ property of T, based on given operation OP. */ static int cpp_check (tree t, cpp_operation op) { switch (op) { case HAS_DEPENDENT_TEMPLATE_ARGS: { tree ti = CLASSTYPE_TEMPLATE_INFO (t); if (!ti) return 0; ++processing_template_decl; const bool dep = any_dependent_template_arguments_p (TI_ARGS (ti)); --processing_template_decl; return dep; } case IS_ABSTRACT: return DECL_PURE_VIRTUAL_P (t); case IS_ASSIGNMENT_OPERATOR: return DECL_ASSIGNMENT_OPERATOR_P (t); case IS_CONSTRUCTOR: return DECL_CONSTRUCTOR_P (t); case IS_DESTRUCTOR: return DECL_DESTRUCTOR_P (t); case IS_COPY_CONSTRUCTOR: return DECL_COPY_CONSTRUCTOR_P (t); case IS_MOVE_CONSTRUCTOR: return DECL_MOVE_CONSTRUCTOR_P (t); case IS_TEMPLATE: return TREE_CODE (t) == TEMPLATE_DECL; case IS_TRIVIAL: return trivial_type_p (t); default: return 0; } } /* Collect source file references recursively, starting from NAMESPC. */ static void collect_source_refs (tree namespc) { /* Iterate over names in this name space. */ for (tree t = NAMESPACE_LEVEL (namespc)->names; t; t = TREE_CHAIN (t)) if (DECL_IS_BUILTIN (t)) ; else if (TREE_CODE (t) == NAMESPACE_DECL && !DECL_NAMESPACE_ALIAS (t)) collect_source_refs (t); else collect_source_ref (DECL_SOURCE_FILE (t)); } /* Collect decls relevant to SOURCE_FILE from all namespaces recursively, starting from NAMESPC. */ static void collect_ada_namespace (tree namespc, const char *source_file) { tree decl = NAMESPACE_LEVEL (namespc)->names; /* Collect decls from this namespace. This will skip NAMESPACE_DECLs (both aliases and regular, it cannot tell). */ collect_ada_nodes (decl, source_file); /* Now scan for namespace children, and dump them. */ for (; decl; decl = TREE_CHAIN (decl)) if (TREE_CODE (decl) == NAMESPACE_DECL && !DECL_NAMESPACE_ALIAS (decl)) collect_ada_namespace (decl, source_file); } /* Returns true iff there is a definition available for variable or function DECL. */ bool decl_defined_p (tree decl) { if (TREE_CODE (decl) == FUNCTION_DECL) return (DECL_INITIAL (decl) != NULL_TREE /* A pending instantiation of a friend temploid is defined. */ || (DECL_FRIEND_PSEUDO_TEMPLATE_INSTANTIATION (decl) && DECL_INITIAL (DECL_TEMPLATE_RESULT (DECL_TI_TEMPLATE (decl))))); else { gcc_assert (VAR_P (decl)); return !DECL_EXTERNAL (decl); } } /* Nonzero for a VAR_DECL whose value can be used in a constant expression. [expr.const] An integral constant-expression can only involve ... const variables of integral or enumeration types initialized with constant expressions ... C++0x also allows constexpr variables and temporaries initialized with constant expressions. We handle the former here, but the latter are just folded away in cxx_eval_constant_expression. The standard does not require that the expression be non-volatile. G++ implements the proposed correction in DR 457. */ bool decl_constant_var_p (tree decl) { if (!decl_maybe_constant_var_p (decl)) return false; /* We don't know if a template static data member is initialized with a constant expression until we instantiate its initializer. Even in the case of a constexpr variable, we can't treat it as a constant until its initializer is complete in case it's used in its own initializer. */ maybe_instantiate_decl (decl); return DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (decl); } /* Returns true if DECL could be a symbolic constant variable, depending on its initializer. */ bool decl_maybe_constant_var_p (tree decl) { tree type = TREE_TYPE (decl); if (!VAR_P (decl)) return false; if (DECL_DECLARED_CONSTEXPR_P (decl) && !TREE_THIS_VOLATILE (decl)) return true; if (DECL_HAS_VALUE_EXPR_P (decl)) /* A proxy isn't constant. */ return false; if (TYPE_REF_P (type)) /* References can be constant. */; else if (CP_TYPE_CONST_NON_VOLATILE_P (type) && INTEGRAL_OR_ENUMERATION_TYPE_P (type)) /* And const integers. */; else return false; if (DECL_INITIAL (decl) && !DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (decl)) /* We know the initializer, and it isn't constant. */ return false; else return true; } /* Complain that DECL uses a type with no linkage. In C++98 mode this is called from grokfndecl and grokvardecl; in all modes it is called from cp_write_global_declarations. */ void no_linkage_error (tree decl) { if (cxx_dialect >= cxx11 && (decl_defined_p (decl) /* Treat templates which limit_bad_template_recursion decided not to instantiate as if they were defined. */ || (errorcount + sorrycount > 0 && DECL_LANG_SPECIFIC (decl) && DECL_TEMPLATE_INFO (decl) && TREE_NO_WARNING (decl)))) /* In C++11 it's ok if the decl is defined. */ return; tree t = no_linkage_check (TREE_TYPE (decl), /*relaxed_p=*/false); if (t == NULL_TREE) /* The type that got us on no_linkage_decls must have gotten a name for linkage purposes. */; else if (CLASS_TYPE_P (t) && TYPE_BEING_DEFINED (t)) /* The type might end up having a typedef name for linkage purposes. */ vec_safe_push (no_linkage_decls, decl); else if (TYPE_UNNAMED_P (t)) { bool d = false; auto_diagnostic_group grp; if (cxx_dialect >= cxx11) d = permerror (DECL_SOURCE_LOCATION (decl), "%q#D, declared using " "unnamed type, is used but never defined", decl); else if (DECL_EXTERN_C_P (decl)) /* Allow this; it's pretty common in C. */; else if (VAR_P (decl)) /* DRs 132, 319 and 389 seem to indicate types with no linkage can only be used to declare extern "C" entities. Since it's not always an error in the ISO C++ 90 Standard, we only issue a warning. */ d = warning_at (DECL_SOURCE_LOCATION (decl), 0, "unnamed type " "with no linkage used to declare variable %q#D with " "linkage", decl); else d = permerror (DECL_SOURCE_LOCATION (decl), "unnamed type with no " "linkage used to declare function %q#D with linkage", decl); if (d && is_typedef_decl (TYPE_NAME (t))) inform (DECL_SOURCE_LOCATION (TYPE_NAME (t)), "%q#D does not refer " "to the unqualified type, so it is not used for linkage", TYPE_NAME (t)); } else if (cxx_dialect >= cxx11) { if (VAR_P (decl) || !DECL_PURE_VIRTUAL_P (decl)) permerror (DECL_SOURCE_LOCATION (decl), "%q#D, declared using local type " "%qT, is used but never defined", decl, t); } else if (VAR_P (decl)) warning_at (DECL_SOURCE_LOCATION (decl), 0, "type %qT with no linkage " "used to declare variable %q#D with linkage", t, decl); else permerror (DECL_SOURCE_LOCATION (decl), "type %qT with no linkage used " "to declare function %q#D with linkage", t, decl); } /* Collect declarations from all namespaces relevant to SOURCE_FILE. */ static void collect_all_refs (const char *source_file) { collect_ada_namespace (global_namespace, source_file); } /* Clear DECL_EXTERNAL for NODE. */ static bool clear_decl_external (struct cgraph_node *node, void * /*data*/) { DECL_EXTERNAL (node->decl) = 0; return false; } /* Build up the function to run dynamic initializers for thread_local variables in this translation unit and alias the init functions for the individual variables to it. */ static void handle_tls_init (void) { tree vars = prune_vars_needing_no_initialization (&tls_aggregates); if (vars == NULL_TREE) return; location_t loc = DECL_SOURCE_LOCATION (TREE_VALUE (vars)); write_out_vars (vars); tree guard = build_decl (loc, VAR_DECL, get_identifier ("__tls_guard"), boolean_type_node); TREE_PUBLIC (guard) = false; TREE_STATIC (guard) = true; DECL_ARTIFICIAL (guard) = true; DECL_IGNORED_P (guard) = true; TREE_USED (guard) = true; CP_DECL_THREAD_LOCAL_P (guard) = true; set_decl_tls_model (guard, decl_default_tls_model (guard)); pushdecl_top_level_and_finish (guard, NULL_TREE); tree fn = get_local_tls_init_fn (loc); start_preparsed_function (fn, NULL_TREE, SF_PRE_PARSED); tree body = begin_function_body (); tree if_stmt = begin_if_stmt (); tree cond = cp_build_unary_op (TRUTH_NOT_EXPR, guard, false, tf_warning_or_error); finish_if_stmt_cond (cond, if_stmt); finish_expr_stmt (cp_build_modify_expr (loc, guard, NOP_EXPR, boolean_true_node, tf_warning_or_error)); for (; vars; vars = TREE_CHAIN (vars)) { tree var = TREE_VALUE (vars); tree init = TREE_PURPOSE (vars); one_static_initialization_or_destruction (var, init, true); /* Output init aliases even with -fno-extern-tls-init. */ if (TARGET_SUPPORTS_ALIASES && TREE_PUBLIC (var)) { tree single_init_fn = get_tls_init_fn (var); if (single_init_fn == NULL_TREE) continue; cgraph_node *alias = cgraph_node::get_create (fn)->create_same_body_alias (single_init_fn, fn); gcc_assert (alias != NULL); } } finish_then_clause (if_stmt); finish_if_stmt (if_stmt); finish_function_body (body); expand_or_defer_fn (finish_function (/*inline_p=*/false)); } /* We're at the end of compilation, so generate any mangling aliases that we've been saving up, if DECL is going to be output and ID2 isn't already taken by another declaration. */ static void generate_mangling_alias (tree decl, tree id2) { struct cgraph_node *n = NULL; if (TREE_CODE (decl) == FUNCTION_DECL) { n = cgraph_node::get (decl); if (!n) /* Don't create an alias to an unreferenced function. */ return; } tree *slot = mangled_decls->find_slot_with_hash (id2, IDENTIFIER_HASH_VALUE (id2), INSERT); /* If there's a declaration already using this mangled name, don't create a compatibility alias that conflicts. */ if (*slot) return; tree alias = make_alias_for (decl, id2); *slot = alias; DECL_IGNORED_P (alias) = 1; TREE_PUBLIC (alias) = TREE_PUBLIC (decl); DECL_VISIBILITY (alias) = DECL_VISIBILITY (decl); if (vague_linkage_p (decl)) DECL_WEAK (alias) = 1; if (n) n->create_same_body_alias (alias, decl); else varpool_node::create_extra_name_alias (alias, decl); } /* Note that we might want to emit an alias with the symbol ID2 for DECL at the end of translation, for compatibility across bugs in the mangling implementation. */ void note_mangling_alias (tree decl, tree id2) { if (TARGET_SUPPORTS_ALIASES) { if (!defer_mangling_aliases) generate_mangling_alias (decl, id2); else { vec_safe_push (mangling_aliases, decl); vec_safe_push (mangling_aliases, id2); } } } /* Emit all mangling aliases that were deferred up to this point. */ void generate_mangling_aliases () { while (!vec_safe_is_empty (mangling_aliases)) { tree id2 = mangling_aliases->pop(); tree decl = mangling_aliases->pop(); generate_mangling_alias (decl, id2); } defer_mangling_aliases = false; } /* Record a mangling of DECL, whose DECL_ASSEMBLER_NAME has just been set. NEED_WARNING is true if we must warn about collisions. We do this to spot changes in mangling that may require compatibility aliases. */ void record_mangling (tree decl, bool need_warning) { if (!mangled_decls) mangled_decls = hash_table<mangled_decl_hash>::create_ggc (499); gcc_checking_assert (DECL_ASSEMBLER_NAME_SET_P (decl)); tree id = DECL_ASSEMBLER_NAME_RAW (decl); tree *slot = mangled_decls->find_slot_with_hash (id, IDENTIFIER_HASH_VALUE (id), INSERT); /* If this is already an alias, remove the alias, because the real decl takes precedence. */ if (*slot && DECL_ARTIFICIAL (*slot) && DECL_IGNORED_P (*slot)) if (symtab_node *n = symtab_node::get (*slot)) if (n->cpp_implicit_alias) { n->remove (); *slot = NULL_TREE; } if (!*slot) *slot = decl; else if (need_warning) { error_at (DECL_SOURCE_LOCATION (decl), "mangling of %q#D as %qE conflicts with a previous mangle", decl, id); inform (DECL_SOURCE_LOCATION (*slot), "previous mangling %q#D", *slot); inform (DECL_SOURCE_LOCATION (decl), "a later %<-fabi-version=%> (or =0)" " avoids this error with a change in mangling"); *slot = decl; } } /* The mangled name of DECL is being forcibly changed to NAME. Remove any existing knowledge of DECL's mangled name meaning DECL. */ void overwrite_mangling (tree decl, tree name) { if (tree id = DECL_ASSEMBLER_NAME_RAW (decl)) if ((TREE_CODE (decl) == VAR_DECL || TREE_CODE (decl) == FUNCTION_DECL) && mangled_decls) if (tree *slot = mangled_decls->find_slot_with_hash (id, IDENTIFIER_HASH_VALUE (id), NO_INSERT)) if (*slot == decl) { mangled_decls->clear_slot (slot); /* If this is an alias, remove it from the symbol table. */ if (DECL_ARTIFICIAL (decl) && DECL_IGNORED_P (decl)) if (symtab_node *n = symtab_node::get (decl)) if (n->cpp_implicit_alias) n->remove (); } DECL_ASSEMBLER_NAME_RAW (decl) = name; } /* The entire file is now complete. If requested, dump everything to a file. */ static void dump_tu (void) { dump_flags_t flags; if (FILE *stream = dump_begin (raw_dump_id, &flags)) { dump_node (global_namespace, flags & ~TDF_SLIM, stream); dump_end (raw_dump_id, stream); } } static location_t locus_at_end_of_parsing; /* Check the deallocation functions for CODE to see if we want to warn that only one was defined. */ static void maybe_warn_sized_delete (enum tree_code code) { tree sized = NULL_TREE; tree unsized = NULL_TREE; for (ovl_iterator iter (get_global_binding (ovl_op_identifier (false, code))); iter; ++iter) { tree fn = *iter; /* We're only interested in usual deallocation functions. */ if (!usual_deallocation_fn_p (fn)) continue; if (FUNCTION_ARG_CHAIN (fn) == void_list_node) unsized = fn; else sized = fn; } if (DECL_INITIAL (unsized) && !DECL_INITIAL (sized)) warning_at (DECL_SOURCE_LOCATION (unsized), OPT_Wsized_deallocation, "the program should also define %qD", sized); else if (!DECL_INITIAL (unsized) && DECL_INITIAL (sized)) warning_at (DECL_SOURCE_LOCATION (sized), OPT_Wsized_deallocation, "the program should also define %qD", unsized); } /* Check the global deallocation functions to see if we want to warn about defining unsized without sized (or vice versa). */ static void maybe_warn_sized_delete () { if (!flag_sized_deallocation || !warn_sized_deallocation) return; maybe_warn_sized_delete (DELETE_EXPR); maybe_warn_sized_delete (VEC_DELETE_EXPR); } /* Earlier we left PTRMEM_CST in variable initializers alone so that we could look them up when evaluating non-type template parameters. Now we need to lower them to something the back end can understand. */ static void lower_var_init () { varpool_node *node; FOR_EACH_VARIABLE (node) { tree d = node->decl; if (tree init = DECL_INITIAL (d)) DECL_INITIAL (d) = cplus_expand_constant (init); } } /* This routine is called at the end of compilation. Its job is to create all the code needed to initialize and destroy the global aggregates. We do the destruction first, since that way we only need to reverse the decls once. */ void c_parse_final_cleanups (void) { tree vars; bool reconsider; size_t i; unsigned ssdf_count = 0; int retries = 0; tree decl; locus_at_end_of_parsing = input_location; at_eof = 1; /* Bad parse errors. Just forget about it. */ if (! global_bindings_p () || current_class_type || !vec_safe_is_empty (decl_namespace_list)) return; /* This is the point to write out a PCH if we're doing that. In that case we do not want to do anything else. */ if (pch_file) { /* Mangle all symbols at PCH creation time. */ symtab_node *node; FOR_EACH_SYMBOL (node) if (! is_a <varpool_node *> (node) || ! DECL_HARD_REGISTER (node->decl)) DECL_ASSEMBLER_NAME (node->decl); c_common_write_pch (); dump_tu (); /* Ensure even the callers don't try to finalize the CU. */ flag_syntax_only = 1; return; } timevar_stop (TV_PHASE_PARSING); timevar_start (TV_PHASE_DEFERRED); symtab->process_same_body_aliases (); /* Handle -fdump-ada-spec[-slim] */ if (flag_dump_ada_spec || flag_dump_ada_spec_slim) { collect_source_ref (main_input_filename); if (!flag_dump_ada_spec_slim) collect_source_refs (global_namespace); dump_ada_specs (collect_all_refs, cpp_check); } /* FIXME - huh? was input_line -= 1;*/ /* We now have to write out all the stuff we put off writing out. These include: o Template specializations that we have not yet instantiated, but which are needed. o Initialization and destruction for non-local objects with static storage duration. (Local objects with static storage duration are initialized when their scope is first entered, and are cleaned up via atexit.) o Virtual function tables. All of these may cause others to be needed. For example, instantiating one function may cause another to be needed, and generating the initializer for an object may cause templates to be instantiated, etc., etc. */ emit_support_tinfos (); do { tree t; tree decl; reconsider = false; /* If there are templates that we've put off instantiating, do them now. */ instantiate_pending_templates (retries); ggc_collect (); /* Write out virtual tables as required. Writing out the virtual table for a template class may cause the instantiation of members of that class. If we write out vtables then we remove the class from our list so we don't have to look at it again. */ for (i = keyed_classes->length (); keyed_classes->iterate (--i, &t);) if (maybe_emit_vtables (t)) { reconsider = true; keyed_classes->unordered_remove (i); } /* The input_location may have been changed during marking of vtable entries. */ input_location = locus_at_end_of_parsing; /* Write out needed type info variables. We have to be careful looping through unemitted decls, because emit_tinfo_decl may cause other variables to be needed. New elements will be appended, and we remove from the vector those that actually get emitted. */ for (i = unemitted_tinfo_decls->length (); unemitted_tinfo_decls->iterate (--i, &t);) if (emit_tinfo_decl (t)) { reconsider = true; unemitted_tinfo_decls->unordered_remove (i); } /* The list of objects with static storage duration is built up in reverse order. We clear STATIC_AGGREGATES so that any new aggregates added during the initialization of these will be initialized in the correct order when we next come around the loop. */ vars = prune_vars_needing_no_initialization (&static_aggregates); if (vars) { /* We need to start a new initialization function each time through the loop. That's because we need to know which vtables have been referenced, and TREE_SYMBOL_REFERENCED isn't computed until a function is finished, and written out. That's a deficiency in the back end. When this is fixed, these initialization functions could all become inline, with resulting performance improvements. */ tree ssdf_body; /* Make sure the back end knows about all the variables. */ write_out_vars (vars); /* Set the line and file, so that it is obviously not from the source file. */ input_location = locus_at_end_of_parsing; ssdf_body = start_static_storage_duration_function (ssdf_count); /* First generate code to do all the initializations. */ if (vars) do_static_initialization_or_destruction (vars, /*initp=*/true); /* Then, generate code to do all the destructions. Do these in reverse order so that the most recently constructed variable is the first destroyed. If we're using __cxa_atexit, then we don't need to do this; functions were registered at initialization time to destroy the local statics. */ if (!flag_use_cxa_atexit && vars) { vars = nreverse (vars); do_static_initialization_or_destruction (vars, /*initp=*/false); } else vars = NULL_TREE; /* Finish up the static storage duration function for this round. */ input_location = locus_at_end_of_parsing; finish_static_storage_duration_function (ssdf_body); /* All those initializations and finalizations might cause us to need more inline functions, more template instantiations, etc. */ reconsider = true; ssdf_count++; /* ??? was: locus_at_end_of_parsing.line++; */ } /* Now do the same for thread_local variables. */ handle_tls_init (); /* Go through the set of inline functions whose bodies have not been emitted yet. If out-of-line copies of these functions are required, emit them. */ FOR_EACH_VEC_SAFE_ELT (deferred_fns, i, decl) { /* Does it need synthesizing? */ if (DECL_DEFAULTED_FN (decl) && ! DECL_INITIAL (decl) && (! DECL_REALLY_EXTERN (decl) || possibly_inlined_p (decl))) { /* Even though we're already at the top-level, we push there again. That way, when we pop back a few lines hence, all of our state is restored. Otherwise, finish_function doesn't clean things up, and we end up with CURRENT_FUNCTION_DECL set. */ push_to_top_level (); /* The decl's location will mark where it was first needed. Save that so synthesize method can indicate where it was needed from, in case of error */ input_location = DECL_SOURCE_LOCATION (decl); synthesize_method (decl); pop_from_top_level (); reconsider = true; } if (!DECL_INITIAL (decl) && decl_tls_wrapper_p (decl)) generate_tls_wrapper (decl); if (!DECL_SAVED_TREE (decl)) continue; cgraph_node *node = cgraph_node::get_create (decl); /* We lie to the back end, pretending that some functions are not defined when they really are. This keeps these functions from being put out unnecessarily. But, we must stop lying when the functions are referenced, or if they are not comdat since they need to be put out now. If DECL_INTERFACE_KNOWN, then we have already set DECL_EXTERNAL appropriately, so there's no need to check again, and we do not want to clear DECL_EXTERNAL if a previous call to import_export_decl set it. This is done in a separate for cycle, because if some deferred function is contained in another deferred function later in deferred_fns varray, rest_of_compilation would skip this function and we really cannot expand the same function twice. */ import_export_decl (decl); if (DECL_NOT_REALLY_EXTERN (decl) && DECL_INITIAL (decl) && decl_needed_p (decl)) { if (node->cpp_implicit_alias) node = node->get_alias_target (); node->call_for_symbol_thunks_and_aliases (clear_decl_external, NULL, true); /* If we mark !DECL_EXTERNAL one of the symbols in some comdat group, we need to mark all symbols in the same comdat group that way. */ if (node->same_comdat_group) for (cgraph_node *next = dyn_cast<cgraph_node *> (node->same_comdat_group); next != node; next = dyn_cast<cgraph_node *> (next->same_comdat_group)) next->call_for_symbol_thunks_and_aliases (clear_decl_external, NULL, true); } /* If we're going to need to write this function out, and there's already a body for it, create RTL for it now. (There might be no body if this is a method we haven't gotten around to synthesizing yet.) */ if (!DECL_EXTERNAL (decl) && decl_needed_p (decl) && !TREE_ASM_WRITTEN (decl) && !node->definition) { /* We will output the function; no longer consider it in this loop. */ DECL_DEFER_OUTPUT (decl) = 0; /* Generate RTL for this function now that we know we need it. */ expand_or_defer_fn (decl); reconsider = true; } } if (wrapup_namespace_globals ()) reconsider = true; /* Static data members are just like namespace-scope globals. */ FOR_EACH_VEC_SAFE_ELT (pending_statics, i, decl) { if (var_finalized_p (decl) || DECL_REALLY_EXTERN (decl) /* Don't write it out if we haven't seen a definition. */ || DECL_IN_AGGR_P (decl)) continue; import_export_decl (decl); /* If this static data member is needed, provide it to the back end. */ if (DECL_NOT_REALLY_EXTERN (decl) && decl_needed_p (decl)) DECL_EXTERNAL (decl) = 0; } if (vec_safe_length (pending_statics) != 0 && wrapup_global_declarations (pending_statics->address (), pending_statics->length ())) reconsider = true; retries++; } while (reconsider); lower_var_init (); generate_mangling_aliases (); /* All used inline functions must have a definition at this point. */ FOR_EACH_VEC_SAFE_ELT (deferred_fns, i, decl) { if (/* Check online inline functions that were actually used. */ DECL_ODR_USED (decl) && DECL_DECLARED_INLINE_P (decl) /* If the definition actually was available here, then the fact that the function was not defined merely represents that for some reason (use of a template repository, #pragma interface, etc.) we decided not to emit the definition here. */ && !DECL_INITIAL (decl) /* Don't complain if the template was defined. */ && !(DECL_TEMPLATE_INSTANTIATION (decl) && DECL_INITIAL (DECL_TEMPLATE_RESULT (template_for_substitution (decl)))) && warning_at (DECL_SOURCE_LOCATION (decl), 0, "inline function %qD used but never defined", decl)) /* Avoid a duplicate warning from check_global_declaration. */ TREE_NO_WARNING (decl) = 1; } /* So must decls that use a type with no linkage. */ FOR_EACH_VEC_SAFE_ELT (no_linkage_decls, i, decl) no_linkage_error (decl); maybe_warn_sized_delete (); /* Then, do the Objective-C stuff. This is where all the Objective-C module stuff gets generated (symtab, class/protocol/selector lists etc). This must be done after C++ templates, destructors etc. so that selectors used in C++ templates are properly allocated. */ if (c_dialect_objc ()) objc_write_global_declarations (); /* We give C linkage to static constructors and destructors. */ push_lang_context (lang_name_c); /* Generate initialization and destruction functions for all priorities for which they are required. */ if (priority_info_map) splay_tree_foreach (priority_info_map, generate_ctor_and_dtor_functions_for_priority, /*data=*/&locus_at_end_of_parsing); else if (c_dialect_objc () && objc_static_init_needed_p ()) /* If this is obj-c++ and we need a static init, call generate_ctor_or_dtor_function. */ generate_ctor_or_dtor_function (/*constructor_p=*/true, DEFAULT_INIT_PRIORITY, &locus_at_end_of_parsing); /* We're done with the splay-tree now. */ if (priority_info_map) splay_tree_delete (priority_info_map); /* Generate any missing aliases. */ maybe_apply_pending_pragma_weaks (); /* We're done with static constructors, so we can go back to "C++" linkage now. */ pop_lang_context (); if (flag_vtable_verify) { vtv_recover_class_info (); vtv_compute_class_hierarchy_transitive_closure (); vtv_build_vtable_verify_fndecl (); } perform_deferred_noexcept_checks (); fini_constexpr (); /* The entire file is now complete. If requested, dump everything to a file. */ dump_tu (); if (flag_detailed_statistics) { dump_tree_statistics (); dump_time_statistics (); } timevar_stop (TV_PHASE_DEFERRED); timevar_start (TV_PHASE_PARSING); /* Indicate that we're done with front end processing. */ at_eof = 2; } /* Perform any post compilation-proper cleanups for the C++ front-end. This should really go away. No front-end should need to do anything past the compilation process. */ void cxx_post_compilation_parsing_cleanups (void) { timevar_start (TV_PHASE_LATE_PARSING_CLEANUPS); if (flag_vtable_verify) { /* Generate the special constructor initialization function that calls __VLTRegisterPairs, and give it a very high initialization priority. This must be done after finalize_compilation_unit so that we have accurate information about which vtable will actually be emitted. */ vtv_generate_init_routine (); } input_location = locus_at_end_of_parsing; if (flag_checking) validate_conversion_obstack (); timevar_stop (TV_PHASE_LATE_PARSING_CLEANUPS); } /* FN is an OFFSET_REF, DOTSTAR_EXPR or MEMBER_REF indicating the function to call in parse-tree form; it has not yet been semantically analyzed. ARGS are the arguments to the function. They have already been semantically analyzed. This may change ARGS. */ tree build_offset_ref_call_from_tree (tree fn, vec<tree, va_gc> **args, tsubst_flags_t complain) { tree orig_fn; vec<tree, va_gc> *orig_args = NULL; tree expr; tree object; orig_fn = fn; object = TREE_OPERAND (fn, 0); if (processing_template_decl) { gcc_assert (TREE_CODE (fn) == DOTSTAR_EXPR || TREE_CODE (fn) == MEMBER_REF); if (type_dependent_expression_p (fn) || any_type_dependent_arguments_p (*args)) return build_min_nt_call_vec (fn, *args); orig_args = make_tree_vector_copy (*args); /* Transform the arguments and add the implicit "this" parameter. That must be done before the FN is transformed because we depend on the form of FN. */ make_args_non_dependent (*args); object = build_non_dependent_expr (object); if (TREE_CODE (TREE_TYPE (fn)) == METHOD_TYPE) { if (TREE_CODE (fn) == DOTSTAR_EXPR) object = cp_build_addr_expr (object, complain); vec_safe_insert (*args, 0, object); } /* Now that the arguments are done, transform FN. */ fn = build_non_dependent_expr (fn); } /* A qualified name corresponding to a bound pointer-to-member is represented as an OFFSET_REF: struct B { void g(); }; void (B::*p)(); void B::g() { (this->*p)(); } */ if (TREE_CODE (fn) == OFFSET_REF) { tree object_addr = cp_build_addr_expr (object, complain); fn = TREE_OPERAND (fn, 1); fn = get_member_function_from_ptrfunc (&object_addr, fn, complain); vec_safe_insert (*args, 0, object_addr); } if (CLASS_TYPE_P (TREE_TYPE (fn))) expr = build_op_call (fn, args, complain); else expr = cp_build_function_call_vec (fn, args, complain); if (processing_template_decl && expr != error_mark_node) expr = build_min_non_dep_call_vec (expr, orig_fn, orig_args); if (orig_args != NULL) release_tree_vector (orig_args); return expr; } void check_default_args (tree x) { tree arg = TYPE_ARG_TYPES (TREE_TYPE (x)); bool saw_def = false; bool noted_first_def = false; int idx_of_first_default_arg = 0; location_t loc_of_first_default_arg = UNKNOWN_LOCATION; int i = 0 - (TREE_CODE (TREE_TYPE (x)) == METHOD_TYPE); tree fndecl = STRIP_TEMPLATE (x); auto_diagnostic_group d; for (; arg && arg != void_list_node; arg = TREE_CHAIN (arg), ++i) { if (TREE_PURPOSE (arg)) { if (!saw_def) { saw_def = true; idx_of_first_default_arg = i; location_t loc = get_fndecl_argument_location (fndecl, i); if (loc != DECL_SOURCE_LOCATION (x)) loc_of_first_default_arg = loc; } } else if (saw_def && !PACK_EXPANSION_P (TREE_VALUE (arg))) { error_at (get_fndecl_argument_location (fndecl, i), "default argument missing for parameter %P of %q#D", i, x); if (loc_of_first_default_arg != UNKNOWN_LOCATION && !noted_first_def) { inform (loc_of_first_default_arg, "...following parameter %P which has a default argument", idx_of_first_default_arg); noted_first_def = true; } TREE_PURPOSE (arg) = error_mark_node; } } } /* Return true if function DECL can be inlined. This is used to force instantiation of methods that might be interesting for inlining. */ bool possibly_inlined_p (tree decl) { gcc_assert (TREE_CODE (decl) == FUNCTION_DECL); if (DECL_UNINLINABLE (decl)) return false; if (!optimize) return DECL_DECLARED_INLINE_P (decl); /* When optimizing, we might inline everything when flatten attribute or heuristics inlining for size or autoinlining is used. */ return true; } /* Normally, we can wait until instantiation-time to synthesize DECL. However, if DECL is a static data member initialized with a constant or a constexpr function, we need it right now because a reference to such a data member or a call to such function is not value-dependent. For a function that uses auto in the return type, we need to instantiate it to find out its type. For OpenMP user defined reductions, we need them instantiated for reduction clauses which inline them by hand directly. */ static void maybe_instantiate_decl (tree decl) { if (DECL_LANG_SPECIFIC (decl) && DECL_TEMPLATE_INFO (decl) && (decl_maybe_constant_var_p (decl) || (TREE_CODE (decl) == FUNCTION_DECL && DECL_OMP_DECLARE_REDUCTION_P (decl)) || undeduced_auto_decl (decl)) && !DECL_DECLARED_CONCEPT_P (decl) && !uses_template_parms (DECL_TI_ARGS (decl))) { /* Instantiating a function will result in garbage collection. We must treat this situation as if we were within the body of a function so as to avoid collecting live data only referenced from the stack (such as overload resolution candidates). */ ++function_depth; instantiate_decl (decl, /*defer_ok=*/false, /*expl_inst_class_mem_p=*/false); --function_depth; } } /* Maybe warn if DECL is deprecated, subject to COMPLAIN. Returns whether or not a warning was emitted. */ bool cp_warn_deprecated_use (tree decl, tsubst_flags_t complain) { if (!(complain & tf_warning) || !decl || deprecated_state == DEPRECATED_SUPPRESS) return false; if (!TREE_DEPRECATED (decl)) { /* Perhaps this is a deprecated typedef. */ if (TYPE_P (decl) && TYPE_NAME (decl)) decl = TYPE_NAME (decl); if (!TREE_DEPRECATED (decl)) return false; } /* Don't warn within members of a deprecated type. */ if (TYPE_P (decl) && currently_open_class (decl)) return false; bool warned = false; if (cxx_dialect >= cxx11 && DECL_P (decl) && DECL_ARTIFICIAL (decl) && DECL_NONSTATIC_MEMBER_FUNCTION_P (decl) && copy_fn_p (decl)) { if (warn_deprecated_copy /* Don't warn about system library classes (c++/86342). */ && (!DECL_IN_SYSTEM_HEADER (decl) || global_dc->dc_warn_system_headers)) { auto_diagnostic_group d; tree ctx = DECL_CONTEXT (decl); tree other = classtype_has_depr_implicit_copy (ctx); int opt = (DECL_DESTRUCTOR_P (other) ? OPT_Wdeprecated_copy_dtor : OPT_Wdeprecated_copy); warned = warning (opt, "implicitly-declared %qD is deprecated", decl); if (warned) inform (DECL_SOURCE_LOCATION (other), "because %qT has user-provided %qD", ctx, other); } } else warned = warn_deprecated_use (decl, NULL_TREE); return warned; } /* Like above, but takes into account outer scopes. */ void cp_warn_deprecated_use_scopes (tree scope) { while (scope && scope != error_mark_node && scope != global_namespace) { if ((TREE_CODE (scope) == NAMESPACE_DECL || OVERLOAD_TYPE_P (scope)) && cp_warn_deprecated_use (scope)) return; if (TYPE_P (scope)) scope = CP_TYPE_CONTEXT (scope); else scope = CP_DECL_CONTEXT (scope); } } /* True if DECL or its enclosing scope have unbound template parameters. */ bool decl_dependent_p (tree decl) { if (DECL_FUNCTION_SCOPE_P (decl) || TREE_CODE (decl) == CONST_DECL || TREE_CODE (decl) == USING_DECL || TREE_CODE (decl) == FIELD_DECL) decl = CP_DECL_CONTEXT (decl); if (tree tinfo = get_template_info (decl)) if (any_dependent_template_arguments_p (TI_ARGS (tinfo))) return true; if (LAMBDA_FUNCTION_P (decl) && dependent_type_p (DECL_CONTEXT (decl))) return true; return false; } /* Mark DECL (either a _DECL or a BASELINK) as "used" in the program. If DECL is a specialization or implicitly declared class member, generate the actual definition. Return false if something goes wrong, true otherwise. */ bool mark_used (tree decl, tsubst_flags_t complain) { /* If we're just testing conversions or resolving overloads, we don't want any permanent effects like forcing functions to be output or instantiating templates. */ if ((complain & tf_conv)) return true; /* If DECL is a BASELINK for a single function, then treat it just like the DECL for the function. Otherwise, if the BASELINK is for an overloaded function, we don't know which function was actually used until after overload resolution. */ if (BASELINK_P (decl)) { decl = BASELINK_FUNCTIONS (decl); if (really_overloaded_fn (decl)) return true; decl = OVL_FIRST (decl); } if (!DECL_P (decl)) return true; /* Set TREE_USED for the benefit of -Wunused. */ TREE_USED (decl) = 1; /* And for structured bindings also the underlying decl. */ if (DECL_DECOMPOSITION_P (decl) && DECL_DECOMP_BASE (decl)) TREE_USED (DECL_DECOMP_BASE (decl)) = 1; if (TREE_CODE (decl) == TEMPLATE_DECL) return true; if (DECL_CLONED_FUNCTION_P (decl)) TREE_USED (DECL_CLONED_FUNCTION (decl)) = 1; /* Mark enumeration types as used. */ if (TREE_CODE (decl) == CONST_DECL) used_types_insert (DECL_CONTEXT (decl)); if (TREE_CODE (decl) == FUNCTION_DECL && !maybe_instantiate_noexcept (decl, complain)) return false; if (TREE_CODE (decl) == FUNCTION_DECL && DECL_DELETED_FN (decl)) { if (DECL_ARTIFICIAL (decl) && DECL_CONV_FN_P (decl) && LAMBDA_TYPE_P (DECL_CONTEXT (decl))) /* We mark a lambda conversion op as deleted if we can't generate it properly; see maybe_add_lambda_conv_op. */ sorry ("converting lambda that uses %<...%> to function pointer"); else if (complain & tf_error) { error ("use of deleted function %qD", decl); if (!maybe_explain_implicit_delete (decl)) inform (DECL_SOURCE_LOCATION (decl), "declared here"); } return false; } cp_warn_deprecated_use (decl, complain); /* We can only check DECL_ODR_USED on variables or functions with DECL_LANG_SPECIFIC set, and these are also the only decls that we might need special handling for. */ if (!VAR_OR_FUNCTION_DECL_P (decl) || DECL_LANG_SPECIFIC (decl) == NULL || DECL_THUNK_P (decl)) { if (!decl_dependent_p (decl) && !require_deduced_type (decl, complain)) return false; return true; } /* We only want to do this processing once. We don't need to keep trying to instantiate inline templates, because unit-at-a-time will make sure we get them compiled before functions that want to inline them. */ if (DECL_ODR_USED (decl)) return true; if (flag_concepts && TREE_CODE (decl) == FUNCTION_DECL && !constraints_satisfied_p (decl)) { if (complain & tf_error) { auto_diagnostic_group d; error ("use of function %qD with unsatisfied constraints", decl); location_t loc = DECL_SOURCE_LOCATION (decl); inform (loc, "declared here"); diagnose_constraints (loc, decl, NULL_TREE); } return false; } /* Normally, we can wait until instantiation-time to synthesize DECL. However, if DECL is a static data member initialized with a constant or a constexpr function, we need it right now because a reference to such a data member or a call to such function is not value-dependent. For a function that uses auto in the return type, we need to instantiate it to find out its type. For OpenMP user defined reductions, we need them instantiated for reduction clauses which inline them by hand directly. */ maybe_instantiate_decl (decl); if (processing_template_decl || in_template_function ()) return true; /* Check this too in case we're within instantiate_non_dependent_expr. */ if (DECL_TEMPLATE_INFO (decl) && uses_template_parms (DECL_TI_ARGS (decl))) return true; if (!require_deduced_type (decl, complain)) return false; if (builtin_pack_fn_p (decl)) { error ("use of built-in parameter pack %qD outside of a template", DECL_NAME (decl)); return false; } /* If we don't need a value, then we don't need to synthesize DECL. */ if (cp_unevaluated_operand || in_discarded_stmt) return true; DECL_ODR_USED (decl) = 1; if (DECL_CLONED_FUNCTION_P (decl)) DECL_ODR_USED (DECL_CLONED_FUNCTION (decl)) = 1; /* DR 757: A type without linkage shall not be used as the type of a variable or function with linkage, unless o the variable or function has extern "C" linkage (7.5 [dcl.link]), or o the variable or function is not used (3.2 [basic.def.odr]) or is defined in the same translation unit. */ if (cxx_dialect > cxx98 && decl_linkage (decl) != lk_none && !DECL_EXTERN_C_P (decl) && !DECL_ARTIFICIAL (decl) && !decl_defined_p (decl) && no_linkage_check (TREE_TYPE (decl), /*relaxed_p=*/false)) { if (is_local_extern (decl)) /* There's no way to define a local extern, and adding it to the vector interferes with GC, so give an error now. */ no_linkage_error (decl); else vec_safe_push (no_linkage_decls, decl); } if (TREE_CODE (decl) == FUNCTION_DECL && DECL_DECLARED_INLINE_P (decl) && !DECL_INITIAL (decl) && !DECL_ARTIFICIAL (decl) && !DECL_PURE_VIRTUAL_P (decl)) /* Remember it, so we can check it was defined. */ note_vague_linkage_fn (decl); /* Is it a synthesized method that needs to be synthesized? */ if (TREE_CODE (decl) == FUNCTION_DECL && DECL_DEFAULTED_FN (decl) /* A function defaulted outside the class is synthesized either by cp_finish_decl or instantiate_decl. */ && !DECL_DEFAULTED_OUTSIDE_CLASS_P (decl) && ! DECL_INITIAL (decl)) { /* Defer virtual destructors so that thunks get the right linkage. */ if (DECL_VIRTUAL_P (decl) && !at_eof) { note_vague_linkage_fn (decl); return true; } /* Remember the current location for a function we will end up synthesizing. Then we can inform the user where it was required in the case of error. */ if (decl_remember_implicit_trigger_p (decl)) DECL_SOURCE_LOCATION (decl) = input_location; /* Synthesizing an implicitly defined member function will result in garbage collection. We must treat this situation as if we were within the body of a function so as to avoid collecting live data on the stack (such as overload resolution candidates). We could just let c_parse_final_cleanups handle synthesizing this function by adding it to deferred_fns, but doing it at the use site produces better error messages. */ ++function_depth; synthesize_method (decl); --function_depth; /* If this is a synthesized method we don't need to do the instantiation test below. */ } else if (VAR_OR_FUNCTION_DECL_P (decl) && DECL_TEMPLATE_INFO (decl) && !DECL_DECLARED_CONCEPT_P (decl) && (!DECL_EXPLICIT_INSTANTIATION (decl) || always_instantiate_p (decl))) /* If this is a function or variable that is an instance of some template, we now know that we will need to actually do the instantiation. We check that DECL is not an explicit instantiation because that is not checked in instantiate_decl. We put off instantiating functions in order to improve compile times. Maintaining a stack of active functions is expensive, and the inliner knows to instantiate any functions it might need. Therefore, we always try to defer instantiation. */ { ++function_depth; instantiate_decl (decl, /*defer_ok=*/true, /*expl_inst_class_mem_p=*/false); --function_depth; } return true; } bool mark_used (tree decl) { return mark_used (decl, tf_warning_or_error); } tree vtv_start_verification_constructor_init_function (void) { return start_objects ('I', MAX_RESERVED_INIT_PRIORITY - 1); } tree vtv_finish_verification_constructor_init_function (tree function_body) { tree fn; finish_compound_stmt (function_body); fn = finish_function (/*inline_p=*/false); DECL_STATIC_CONSTRUCTOR (fn) = 1; decl_init_priority_insert (fn, MAX_RESERVED_INIT_PRIORITY - 1); return fn; } #include "gt-cp-decl2.h"
Sema.h
//===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the Sema class, which performs semantic analysis and // builds ASTs. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_SEMA_SEMA_H #define LLVM_CLANG_SEMA_SEMA_H #include "clang/AST/Attr.h" #include "clang/AST/Availability.h" #include "clang/AST/ComparisonCategories.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/ExternalASTSource.h" #include "clang/AST/LocInfoType.h" #include "clang/AST/MangleNumberingContext.h" #include "clang/AST/NSAPI.h" #include "clang/AST/PrettyPrinter.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/TypeLoc.h" #include "clang/AST/TypeOrdering.h" #include "clang/Basic/ExpressionTraits.h" #include "clang/Basic/Module.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/PragmaKinds.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TemplateKinds.h" #include "clang/Basic/TypeTraits.h" #include "clang/Sema/AnalysisBasedWarnings.h" #include "clang/Sema/CleanupInfo.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/ExternalSemaSource.h" #include "clang/Sema/IdentifierResolver.h" #include "clang/Sema/ObjCMethodList.h" #include "clang/Sema/Ownership.h" #include "clang/Sema/Scope.h" #include "clang/Sema/TypoCorrection.h" #include "clang/Sema/Weak.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallBitVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/TinyPtrVector.h" #include <deque> #include <memory> #include <string> #include <vector> namespace llvm { class APSInt; template <typename ValueT> struct DenseMapInfo; template <typename ValueT, typename ValueInfoT> class DenseSet; class SmallBitVector; struct InlineAsmIdentifierInfo; } namespace clang { class ADLResult; class ASTConsumer; class ASTContext; class ASTMutationListener; class ASTReader; class ASTWriter; class ArrayType; class ParsedAttr; class BindingDecl; class BlockDecl; class CapturedDecl; class CXXBasePath; class CXXBasePaths; class CXXBindTemporaryExpr; typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath; class CXXConstructorDecl; class CXXConversionDecl; class CXXDeleteExpr; class CXXDestructorDecl; class CXXFieldCollector; class CXXMemberCallExpr; class CXXMethodDecl; class CXXScopeSpec; class CXXTemporary; class CXXTryStmt; class CallExpr; class ClassTemplateDecl; class ClassTemplatePartialSpecializationDecl; class ClassTemplateSpecializationDecl; class VarTemplatePartialSpecializationDecl; class CodeCompleteConsumer; class CodeCompletionAllocator; class CodeCompletionTUInfo; class CodeCompletionResult; class CoroutineBodyStmt; class Decl; class DeclAccessPair; class DeclContext; class DeclRefExpr; class DeclaratorDecl; class DeducedTemplateArgument; class DependentDiagnostic; class DesignatedInitExpr; class Designation; class EnableIfAttr; class EnumConstantDecl; class Expr; class ExtVectorType; class FormatAttr; class FriendDecl; class FunctionDecl; class FunctionProtoType; class FunctionTemplateDecl; class ImplicitConversionSequence; typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList; class InitListExpr; class InitializationKind; class InitializationSequence; class InitializedEntity; class IntegerLiteral; class LabelStmt; class LambdaExpr; class LangOptions; class LocalInstantiationScope; class LookupResult; class MacroInfo; typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath; class ModuleLoader; class MultiLevelTemplateArgumentList; class NamedDecl; class ObjCCategoryDecl; class ObjCCategoryImplDecl; class ObjCCompatibleAliasDecl; class ObjCContainerDecl; class ObjCImplDecl; class ObjCImplementationDecl; class ObjCInterfaceDecl; class ObjCIvarDecl; template <class T> class ObjCList; class ObjCMessageExpr; class ObjCMethodDecl; class ObjCPropertyDecl; class ObjCProtocolDecl; class OMPThreadPrivateDecl; class OMPRequiresDecl; class OMPDeclareReductionDecl; class OMPDeclareSimdDecl; class OMPClause; struct OMPVarListLocTy; struct OverloadCandidate; class OverloadCandidateSet; class OverloadExpr; class ParenListExpr; class ParmVarDecl; class Preprocessor; class PseudoDestructorTypeStorage; class PseudoObjectExpr; class QualType; class StandardConversionSequence; class Stmt; class StringLiteral; class SwitchStmt; class TemplateArgument; class TemplateArgumentList; class TemplateArgumentLoc; class TemplateDecl; class TemplateInstantiationCallback; class TemplateParameterList; class TemplatePartialOrderingContext; class TemplateTemplateParmDecl; class Token; class TypeAliasDecl; class TypedefDecl; class TypedefNameDecl; class TypeLoc; class TypoCorrectionConsumer; class UnqualifiedId; class UnresolvedLookupExpr; class UnresolvedMemberExpr; class UnresolvedSetImpl; class UnresolvedSetIterator; class UsingDecl; class UsingShadowDecl; class ValueDecl; class VarDecl; class VarTemplateSpecializationDecl; class VisibilityAttr; class VisibleDeclConsumer; class IndirectFieldDecl; struct DeductionFailureInfo; class TemplateSpecCandidateSet; namespace sema { class AccessedEntity; class BlockScopeInfo; class Capture; class CapturedRegionScopeInfo; class CapturingScopeInfo; class CompoundScopeInfo; class DelayedDiagnostic; class DelayedDiagnosticPool; class FunctionScopeInfo; class LambdaScopeInfo; class PossiblyUnreachableDiag; class SemaPPCallbacks; class TemplateDeductionInfo; } namespace threadSafety { class BeforeSet; void threadSafetyCleanup(BeforeSet* Cache); } // FIXME: No way to easily map from TemplateTypeParmTypes to // TemplateTypeParmDecls, so we have this horrible PointerUnion. typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>, SourceLocation> UnexpandedParameterPack; /// Describes whether we've seen any nullability information for the given /// file. struct FileNullability { /// The first pointer declarator (of any pointer kind) in the file that does /// not have a corresponding nullability annotation. SourceLocation PointerLoc; /// The end location for the first pointer declarator in the file. Used for /// placing fix-its. SourceLocation PointerEndLoc; /// Which kind of pointer declarator we saw. uint8_t PointerKind; /// Whether we saw any type nullability annotations in the given file. bool SawTypeNullability = false; }; /// A mapping from file IDs to a record of whether we've seen nullability /// information in that file. class FileNullabilityMap { /// A mapping from file IDs to the nullability information for each file ID. llvm::DenseMap<FileID, FileNullability> Map; /// A single-element cache based on the file ID. struct { FileID File; FileNullability Nullability; } Cache; public: FileNullability &operator[](FileID file) { // Check the single-element cache. if (file == Cache.File) return Cache.Nullability; // It's not in the single-element cache; flush the cache if we have one. if (!Cache.File.isInvalid()) { Map[Cache.File] = Cache.Nullability; } // Pull this entry into the cache. Cache.File = file; Cache.Nullability = Map[file]; return Cache.Nullability; } }; /// Keeps track of expected type during expression parsing. The type is tied to /// a particular token, all functions that update or consume the type take a /// start location of the token they are looking at as a parameter. This allows /// to avoid updating the type on hot paths in the parser. class PreferredTypeBuilder { public: PreferredTypeBuilder() = default; explicit PreferredTypeBuilder(QualType Type) : Type(Type) {} void enterCondition(Sema &S, SourceLocation Tok); void enterReturn(Sema &S, SourceLocation Tok); void enterVariableInit(SourceLocation Tok, Decl *D); /// Computing a type for the function argument may require running /// overloading, so we postpone its computation until it is actually needed. /// /// Clients should be very careful when using this funciton, as it stores a /// function_ref, clients should make sure all calls to get() with the same /// location happen while function_ref is alive. void enterFunctionArgument(SourceLocation Tok, llvm::function_ref<QualType()> ComputeType); void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc); void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind, SourceLocation OpLoc); void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op); void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base); void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS); /// Handles all type casts, including C-style cast, C++ casts, etc. void enterTypeCast(SourceLocation Tok, QualType CastType); QualType get(SourceLocation Tok) const { if (Tok != ExpectedLoc) return QualType(); if (!Type.isNull()) return Type; if (ComputeType) return ComputeType(); return QualType(); } private: /// Start position of a token for which we store expected type. SourceLocation ExpectedLoc; /// Expected type for a token starting at ExpectedLoc. QualType Type; /// A function to compute expected type at ExpectedLoc. It is only considered /// if Type is null. llvm::function_ref<QualType()> ComputeType; }; /// Sema - This implements semantic analysis and AST building for C. class Sema { Sema(const Sema &) = delete; void operator=(const Sema &) = delete; ///Source of additional semantic information. ExternalSemaSource *ExternalSource; ///Whether Sema has generated a multiplexer and has to delete it. bool isMultiplexExternalSource; static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD); bool isVisibleSlow(const NamedDecl *D); /// Determine whether two declarations should be linked together, given that /// the old declaration might not be visible and the new declaration might /// not have external linkage. bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old, const NamedDecl *New) { if (isVisible(Old)) return true; // See comment in below overload for why it's safe to compute the linkage // of the new declaration here. if (New->isExternallyDeclarable()) { assert(Old->isExternallyDeclarable() && "should not have found a non-externally-declarable previous decl"); return true; } return false; } bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New); void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem, QualType ResultTy, ArrayRef<QualType> Args); public: typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef OpaquePtr<QualType> TypeTy; OpenCLOptions OpenCLFeatures; FPOptions FPFeatures; const LangOptions &LangOpts; Preprocessor &PP; ASTContext &Context; ASTConsumer &Consumer; DiagnosticsEngine &Diags; SourceManager &SourceMgr; /// Flag indicating whether or not to collect detailed statistics. bool CollectStats; /// Code-completion consumer. CodeCompleteConsumer *CodeCompleter; /// CurContext - This is the current declaration context of parsing. DeclContext *CurContext; /// Generally null except when we temporarily switch decl contexts, /// like in \see ActOnObjCTemporaryExitContainerContext. DeclContext *OriginalLexicalContext; /// VAListTagName - The declaration name corresponding to __va_list_tag. /// This is used as part of a hack to omit that class from ADL results. DeclarationName VAListTagName; bool MSStructPragmaOn; // True when \#pragma ms_struct on /// Controls member pointer representation format under the MS ABI. LangOptions::PragmaMSPointersToMembersKind MSPointerToMemberRepresentationMethod; /// Stack of active SEH __finally scopes. Can be empty. SmallVector<Scope*, 2> CurrentSEHFinally; /// Source location for newly created implicit MSInheritanceAttrs SourceLocation ImplicitMSInheritanceAttrLoc; /// pragma clang section kind enum PragmaClangSectionKind { PCSK_Invalid = 0, PCSK_BSS = 1, PCSK_Data = 2, PCSK_Rodata = 3, PCSK_Text = 4 }; enum PragmaClangSectionAction { PCSA_Set = 0, PCSA_Clear = 1 }; struct PragmaClangSection { std::string SectionName; bool Valid = false; SourceLocation PragmaLocation; void Act(SourceLocation PragmaLocation, PragmaClangSectionAction Action, StringLiteral* Name); }; PragmaClangSection PragmaClangBSSSection; PragmaClangSection PragmaClangDataSection; PragmaClangSection PragmaClangRodataSection; PragmaClangSection PragmaClangTextSection; enum PragmaMsStackAction { PSK_Reset = 0x0, // #pragma () PSK_Set = 0x1, // #pragma (value) PSK_Push = 0x2, // #pragma (push[, id]) PSK_Pop = 0x4, // #pragma (pop[, id]) PSK_Show = 0x8, // #pragma (show) -- only for "pack"! PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value) PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value) }; template<typename ValueType> struct PragmaStack { struct Slot { llvm::StringRef StackSlotLabel; ValueType Value; SourceLocation PragmaLocation; SourceLocation PragmaPushLocation; Slot(llvm::StringRef StackSlotLabel, ValueType Value, SourceLocation PragmaLocation, SourceLocation PragmaPushLocation) : StackSlotLabel(StackSlotLabel), Value(Value), PragmaLocation(PragmaLocation), PragmaPushLocation(PragmaPushLocation) {} }; void Act(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, ValueType Value); // MSVC seems to add artificial slots to #pragma stacks on entering a C++ // method body to restore the stacks on exit, so it works like this: // // struct S { // #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>) // void Method {} // #pragma <name>(pop, InternalPragmaSlot) // }; // // It works even with #pragma vtordisp, although MSVC doesn't support // #pragma vtordisp(push [, id], n) // syntax. // // Push / pop a named sentinel slot. void SentinelAction(PragmaMsStackAction Action, StringRef Label) { assert((Action == PSK_Push || Action == PSK_Pop) && "Can only push / pop #pragma stack sentinels!"); Act(CurrentPragmaLocation, Action, Label, CurrentValue); } // Constructors. explicit PragmaStack(const ValueType &Default) : DefaultValue(Default), CurrentValue(Default) {} bool hasValue() const { return CurrentValue != DefaultValue; } SmallVector<Slot, 2> Stack; ValueType DefaultValue; // Value used for PSK_Reset action. ValueType CurrentValue; SourceLocation CurrentPragmaLocation; }; // FIXME: We should serialize / deserialize these if they occur in a PCH (but // we shouldn't do so if they're in a module). /// Whether to insert vtordisps prior to virtual bases in the Microsoft /// C++ ABI. Possible values are 0, 1, and 2, which mean: /// /// 0: Suppress all vtordisps /// 1: Insert vtordisps in the presence of vbase overrides and non-trivial /// structors /// 2: Always insert vtordisps to support RTTI on partially constructed /// objects PragmaStack<MSVtorDispAttr::Mode> VtorDispStack; // #pragma pack. // Sentinel to represent when the stack is set to mac68k alignment. static const unsigned kMac68kAlignmentSentinel = ~0U; PragmaStack<unsigned> PackStack; // The current #pragma pack values and locations at each #include. struct PackIncludeState { unsigned CurrentValue; SourceLocation CurrentPragmaLocation; bool HasNonDefaultValue, ShouldWarnOnInclude; }; SmallVector<PackIncludeState, 8> PackIncludeStack; // Segment #pragmas. PragmaStack<StringLiteral *> DataSegStack; PragmaStack<StringLiteral *> BSSSegStack; PragmaStack<StringLiteral *> ConstSegStack; PragmaStack<StringLiteral *> CodeSegStack; // RAII object to push / pop sentinel slots for all MS #pragma stacks. // Actions should be performed only if we enter / exit a C++ method body. class PragmaStackSentinelRAII { public: PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct); ~PragmaStackSentinelRAII(); private: Sema &S; StringRef SlotLabel; bool ShouldAct; }; /// A mapping that describes the nullability we've seen in each header file. FileNullabilityMap NullabilityMap; /// Last section used with #pragma init_seg. StringLiteral *CurInitSeg; SourceLocation CurInitSegLoc; /// VisContext - Manages the stack for \#pragma GCC visibility. void *VisContext; // Really a "PragmaVisStack*" /// This an attribute introduced by \#pragma clang attribute. struct PragmaAttributeEntry { SourceLocation Loc; ParsedAttr *Attribute; SmallVector<attr::SubjectMatchRule, 4> MatchRules; bool IsUsed; }; /// A push'd group of PragmaAttributeEntries. struct PragmaAttributeGroup { /// The location of the push attribute. SourceLocation Loc; /// The namespace of this push group. const IdentifierInfo *Namespace; SmallVector<PragmaAttributeEntry, 2> Entries; }; SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack; /// The declaration that is currently receiving an attribute from the /// #pragma attribute stack. const Decl *PragmaAttributeCurrentTargetDecl; /// This represents the last location of a "#pragma clang optimize off" /// directive if such a directive has not been closed by an "on" yet. If /// optimizations are currently "on", this is set to an invalid location. SourceLocation OptimizeOffPragmaLocation; /// Flag indicating if Sema is building a recovery call expression. /// /// This flag is used to avoid building recovery call expressions /// if Sema is already doing so, which would cause infinite recursions. bool IsBuildingRecoveryCallExpr; /// Used to control the generation of ExprWithCleanups. CleanupInfo Cleanup; /// ExprCleanupObjects - This is the stack of objects requiring /// cleanup that are created by the current full expression. The /// element type here is ExprWithCleanups::Object. SmallVector<BlockDecl*, 8> ExprCleanupObjects; /// Store a set of either DeclRefExprs or MemberExprs that contain a reference /// to a variable (constant) that may or may not be odr-used in this Expr, and /// we won't know until all lvalue-to-rvalue and discarded value conversions /// have been applied to all subexpressions of the enclosing full expression. /// This is cleared at the end of each full expression. using MaybeODRUseExprSet = llvm::SmallPtrSet<Expr *, 2>; MaybeODRUseExprSet MaybeODRUseExprs; std::unique_ptr<sema::FunctionScopeInfo> PreallocatedFunctionScope; /// Stack containing information about each of the nested /// function, block, and method scopes that are currently active. SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes; typedef LazyVector<TypedefNameDecl *, ExternalSemaSource, &ExternalSemaSource::ReadExtVectorDecls, 2, 2> ExtVectorDeclsType; /// ExtVectorDecls - This is a list all the extended vector types. This allows /// us to associate a raw vector type with one of the ext_vector type names. /// This is only necessary for issuing pretty diagnostics. ExtVectorDeclsType ExtVectorDecls; /// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes. std::unique_ptr<CXXFieldCollector> FieldCollector; typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType; /// Set containing all declared private fields that are not used. NamedDeclSetType UnusedPrivateFields; /// Set containing all typedefs that are likely unused. llvm::SmallSetVector<const TypedefNameDecl *, 4> UnusedLocalTypedefNameCandidates; /// Delete-expressions to be analyzed at the end of translation unit /// /// This list contains class members, and locations of delete-expressions /// that could not be proven as to whether they mismatch with new-expression /// used in initializer of the field. typedef std::pair<SourceLocation, bool> DeleteExprLoc; typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs; llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs; typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy; /// PureVirtualClassDiagSet - a set of class declarations which we have /// emitted a list of pure virtual functions. Used to prevent emitting the /// same list more than once. std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet; /// ParsingInitForAutoVars - a set of declarations with auto types for which /// we are currently parsing the initializer. llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars; /// Look for a locally scoped extern "C" declaration by the given name. NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name); typedef LazyVector<VarDecl *, ExternalSemaSource, &ExternalSemaSource::ReadTentativeDefinitions, 2, 2> TentativeDefinitionsType; /// All the tentative definitions encountered in the TU. TentativeDefinitionsType TentativeDefinitions; typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2> UnusedFileScopedDeclsType; /// The set of file scoped decls seen so far that have not been used /// and must warn if not used. Only contains the first declaration. UnusedFileScopedDeclsType UnusedFileScopedDecls; typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadDelegatingConstructors, 2, 2> DelegatingCtorDeclsType; /// All the delegating constructors seen so far in the file, used for /// cycle detection at the end of the TU. DelegatingCtorDeclsType DelegatingCtorDecls; /// All the overriding functions seen during a class definition /// that had their exception spec checks delayed, plus the overridden /// function. SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2> DelayedOverridingExceptionSpecChecks; /// All the function redeclarations seen during a class definition that had /// their exception spec checks delayed, plus the prior declaration they /// should be checked against. Except during error recovery, the new decl /// should always be a friend declaration, as that's the only valid way to /// redeclare a special member before its class is complete. SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2> DelayedEquivalentExceptionSpecChecks; typedef llvm::MapVector<const FunctionDecl *, std::unique_ptr<LateParsedTemplate>> LateParsedTemplateMapT; LateParsedTemplateMapT LateParsedTemplateMap; /// Callback to the parser to parse templated functions when needed. typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT); typedef void LateTemplateParserCleanupCB(void *P); LateTemplateParserCB *LateTemplateParser; LateTemplateParserCleanupCB *LateTemplateParserCleanup; void *OpaqueParser; void SetLateTemplateParser(LateTemplateParserCB *LTP, LateTemplateParserCleanupCB *LTPCleanup, void *P) { LateTemplateParser = LTP; LateTemplateParserCleanup = LTPCleanup; OpaqueParser = P; } class DelayedDiagnostics; class DelayedDiagnosticsState { sema::DelayedDiagnosticPool *SavedPool; friend class Sema::DelayedDiagnostics; }; typedef DelayedDiagnosticsState ParsingDeclState; typedef DelayedDiagnosticsState ProcessingContextState; /// A class which encapsulates the logic for delaying diagnostics /// during parsing and other processing. class DelayedDiagnostics { /// The current pool of diagnostics into which delayed /// diagnostics should go. sema::DelayedDiagnosticPool *CurPool; public: DelayedDiagnostics() : CurPool(nullptr) {} /// Adds a delayed diagnostic. void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h /// Determines whether diagnostics should be delayed. bool shouldDelayDiagnostics() { return CurPool != nullptr; } /// Returns the current delayed-diagnostics pool. sema::DelayedDiagnosticPool *getCurrentPool() const { return CurPool; } /// Enter a new scope. Access and deprecation diagnostics will be /// collected in this pool. DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = &pool; return state; } /// Leave a delayed-diagnostic state that was previously pushed. /// Do not emit any of the diagnostics. This is performed as part /// of the bookkeeping of popping a pool "properly". void popWithoutEmitting(DelayedDiagnosticsState state) { CurPool = state.SavedPool; } /// Enter a new scope where access and deprecation diagnostics are /// not delayed. DelayedDiagnosticsState pushUndelayed() { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = nullptr; return state; } /// Undo a previous pushUndelayed(). void popUndelayed(DelayedDiagnosticsState state) { assert(CurPool == nullptr); CurPool = state.SavedPool; } } DelayedDiagnostics; /// A RAII object to temporarily push a declaration context. class ContextRAII { private: Sema &S; DeclContext *SavedContext; ProcessingContextState SavedContextState; QualType SavedCXXThisTypeOverride; public: ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true) : S(S), SavedContext(S.CurContext), SavedContextState(S.DelayedDiagnostics.pushUndelayed()), SavedCXXThisTypeOverride(S.CXXThisTypeOverride) { assert(ContextToPush && "pushing null context"); S.CurContext = ContextToPush; if (NewThisContext) S.CXXThisTypeOverride = QualType(); } void pop() { if (!SavedContext) return; S.CurContext = SavedContext; S.DelayedDiagnostics.popUndelayed(SavedContextState); S.CXXThisTypeOverride = SavedCXXThisTypeOverride; SavedContext = nullptr; } ~ContextRAII() { pop(); } }; /// RAII object to handle the state changes required to synthesize /// a function body. class SynthesizedFunctionScope { Sema &S; Sema::ContextRAII SavedContext; bool PushedCodeSynthesisContext = false; public: SynthesizedFunctionScope(Sema &S, DeclContext *DC) : S(S), SavedContext(S, DC) { S.PushFunctionScope(); S.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::PotentiallyEvaluated); if (auto *FD = dyn_cast<FunctionDecl>(DC)) FD->setWillHaveBody(true); else assert(isa<ObjCMethodDecl>(DC)); } void addContextNote(SourceLocation UseLoc) { assert(!PushedCodeSynthesisContext); Sema::CodeSynthesisContext Ctx; Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction; Ctx.PointOfInstantiation = UseLoc; Ctx.Entity = cast<Decl>(S.CurContext); S.pushCodeSynthesisContext(Ctx); PushedCodeSynthesisContext = true; } ~SynthesizedFunctionScope() { if (PushedCodeSynthesisContext) S.popCodeSynthesisContext(); if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext)) FD->setWillHaveBody(false); S.PopExpressionEvaluationContext(); S.PopFunctionScopeInfo(); } }; /// WeakUndeclaredIdentifiers - Identifiers contained in /// \#pragma weak before declared. rare. may alias another /// identifier, declared or undeclared llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers; /// ExtnameUndeclaredIdentifiers - Identifiers contained in /// \#pragma redefine_extname before declared. Used in Solaris system headers /// to define functions that occur in multiple standards to call the version /// in the currently selected standard. llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers; /// Load weak undeclared identifiers from the external source. void LoadExternalWeakUndeclaredIdentifiers(); /// WeakTopLevelDecl - Translation-unit scoped declarations generated by /// \#pragma weak during processing of other Decls. /// I couldn't figure out a clean way to generate these in-line, so /// we store them here and handle separately -- which is a hack. /// It would be best to refactor this. SmallVector<Decl*,2> WeakTopLevelDecl; IdentifierResolver IdResolver; /// Translation Unit Scope - useful to Objective-C actions that need /// to lookup file scope declarations in the "ordinary" C decl namespace. /// For example, user-defined classes, built-in "id" type, etc. Scope *TUScope; /// The C++ "std" namespace, where the standard library resides. LazyDeclPtr StdNamespace; /// The C++ "std::bad_alloc" class, which is defined by the C++ /// standard library. LazyDeclPtr StdBadAlloc; /// The C++ "std::align_val_t" enum class, which is defined by the C++ /// standard library. LazyDeclPtr StdAlignValT; /// The C++ "std::experimental" namespace, where the experimental parts /// of the standard library resides. NamespaceDecl *StdExperimentalNamespaceCache; /// The C++ "std::initializer_list" template, which is defined in /// \<initializer_list>. ClassTemplateDecl *StdInitializerList; /// The C++ "std::coroutine_traits" template, which is defined in /// \<coroutine_traits> ClassTemplateDecl *StdCoroutineTraitsCache; /// The C++ "type_info" declaration, which is defined in \<typeinfo>. RecordDecl *CXXTypeInfoDecl; /// The MSVC "_GUID" struct, which is defined in MSVC header files. RecordDecl *MSVCGuidDecl; /// Caches identifiers/selectors for NSFoundation APIs. std::unique_ptr<NSAPI> NSAPIObj; /// The declaration of the Objective-C NSNumber class. ObjCInterfaceDecl *NSNumberDecl; /// The declaration of the Objective-C NSValue class. ObjCInterfaceDecl *NSValueDecl; /// Pointer to NSNumber type (NSNumber *). QualType NSNumberPointer; /// Pointer to NSValue type (NSValue *). QualType NSValuePointer; /// The Objective-C NSNumber methods used to create NSNumber literals. ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods]; /// The declaration of the Objective-C NSString class. ObjCInterfaceDecl *NSStringDecl; /// Pointer to NSString type (NSString *). QualType NSStringPointer; /// The declaration of the stringWithUTF8String: method. ObjCMethodDecl *StringWithUTF8StringMethod; /// The declaration of the valueWithBytes:objCType: method. ObjCMethodDecl *ValueWithBytesObjCTypeMethod; /// The declaration of the Objective-C NSArray class. ObjCInterfaceDecl *NSArrayDecl; /// The declaration of the arrayWithObjects:count: method. ObjCMethodDecl *ArrayWithObjectsMethod; /// The declaration of the Objective-C NSDictionary class. ObjCInterfaceDecl *NSDictionaryDecl; /// The declaration of the dictionaryWithObjects:forKeys:count: method. ObjCMethodDecl *DictionaryWithObjectsMethod; /// id<NSCopying> type. QualType QIDNSCopying; /// will hold 'respondsToSelector:' Selector RespondsToSelectorSel; /// A flag to remember whether the implicit forms of operator new and delete /// have been declared. bool GlobalNewDeleteDeclared; /// A flag to indicate that we're in a context that permits abstract /// references to fields. This is really a bool AllowAbstractFieldReference; /// Describes how the expressions currently being parsed are /// evaluated at run-time, if at all. enum class ExpressionEvaluationContext { /// The current expression and its subexpressions occur within an /// unevaluated operand (C++11 [expr]p7), such as the subexpression of /// \c sizeof, where the type of the expression may be significant but /// no code will be generated to evaluate the value of the expression at /// run time. Unevaluated, /// The current expression occurs within a braced-init-list within /// an unevaluated operand. This is mostly like a regular unevaluated /// context, except that we still instantiate constexpr functions that are /// referenced here so that we can perform narrowing checks correctly. UnevaluatedList, /// The current expression occurs within a discarded statement. /// This behaves largely similarly to an unevaluated operand in preventing /// definitions from being required, but not in other ways. DiscardedStatement, /// The current expression occurs within an unevaluated /// operand that unconditionally permits abstract references to /// fields, such as a SIZE operator in MS-style inline assembly. UnevaluatedAbstract, /// The current context is "potentially evaluated" in C++11 terms, /// but the expression is evaluated at compile-time (like the values of /// cases in a switch statement). ConstantEvaluated, /// The current expression is potentially evaluated at run time, /// which means that code may be generated to evaluate the value of the /// expression at run time. PotentiallyEvaluated, /// The current expression is potentially evaluated, but any /// declarations referenced inside that expression are only used if /// in fact the current expression is used. /// /// This value is used when parsing default function arguments, for which /// we would like to provide diagnostics (e.g., passing non-POD arguments /// through varargs) but do not want to mark declarations as "referenced" /// until the default argument is used. PotentiallyEvaluatedIfUsed }; /// Data structure used to record current or nested /// expression evaluation contexts. struct ExpressionEvaluationContextRecord { /// The expression evaluation context. ExpressionEvaluationContext Context; /// Whether the enclosing context needed a cleanup. CleanupInfo ParentCleanup; /// Whether we are in a decltype expression. bool IsDecltype; /// The number of active cleanup objects when we entered /// this expression evaluation context. unsigned NumCleanupObjects; /// The number of typos encountered during this expression evaluation /// context (i.e. the number of TypoExprs created). unsigned NumTypos; MaybeODRUseExprSet SavedMaybeODRUseExprs; /// The lambdas that are present within this context, if it /// is indeed an unevaluated context. SmallVector<LambdaExpr *, 2> Lambdas; /// The declaration that provides context for lambda expressions /// and block literals if the normal declaration context does not /// suffice, e.g., in a default function argument. Decl *ManglingContextDecl; /// The context information used to mangle lambda expressions /// and block literals within this context. /// /// This mangling information is allocated lazily, since most contexts /// do not have lambda expressions or block literals. std::unique_ptr<MangleNumberingContext> MangleNumbering; /// If we are processing a decltype type, a set of call expressions /// for which we have deferred checking the completeness of the return type. SmallVector<CallExpr *, 8> DelayedDecltypeCalls; /// If we are processing a decltype type, a set of temporary binding /// expressions for which we have deferred checking the destructor. SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds; llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs; /// \brief Describes whether we are in an expression constext which we have /// to handle differently. enum ExpressionKind { EK_Decltype, EK_TemplateArgument, EK_Other } ExprContext; ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context, unsigned NumCleanupObjects, CleanupInfo ParentCleanup, Decl *ManglingContextDecl, ExpressionKind ExprContext) : Context(Context), ParentCleanup(ParentCleanup), NumCleanupObjects(NumCleanupObjects), NumTypos(0), ManglingContextDecl(ManglingContextDecl), MangleNumbering(), ExprContext(ExprContext) {} /// Retrieve the mangling numbering context, used to consistently /// number constructs like lambdas for mangling. MangleNumberingContext &getMangleNumberingContext(ASTContext &Ctx); bool isUnevaluated() const { return Context == ExpressionEvaluationContext::Unevaluated || Context == ExpressionEvaluationContext::UnevaluatedAbstract || Context == ExpressionEvaluationContext::UnevaluatedList; } bool isConstantEvaluated() const { return Context == ExpressionEvaluationContext::ConstantEvaluated; } }; /// A stack of expression evaluation contexts. SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts; /// Emit a warning for all pending noderef expressions that we recorded. void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec); /// Compute the mangling number context for a lambda expression or /// block literal. /// /// \param DC - The DeclContext containing the lambda expression or /// block literal. /// \param[out] ManglingContextDecl - Returns the ManglingContextDecl /// associated with the context, if relevant. MangleNumberingContext *getCurrentMangleNumberContext( const DeclContext *DC, Decl *&ManglingContextDecl); /// SpecialMemberOverloadResult - The overloading result for a special member /// function. /// /// This is basically a wrapper around PointerIntPair. The lowest bits of the /// integer are used to determine whether overload resolution succeeded. class SpecialMemberOverloadResult { public: enum Kind { NoMemberOrDeleted, Ambiguous, Success }; private: llvm::PointerIntPair<CXXMethodDecl*, 2> Pair; public: SpecialMemberOverloadResult() : Pair() {} SpecialMemberOverloadResult(CXXMethodDecl *MD) : Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {} CXXMethodDecl *getMethod() const { return Pair.getPointer(); } void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); } Kind getKind() const { return static_cast<Kind>(Pair.getInt()); } void setKind(Kind K) { Pair.setInt(K); } }; class SpecialMemberOverloadResultEntry : public llvm::FastFoldingSetNode, public SpecialMemberOverloadResult { public: SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID) : FastFoldingSetNode(ID) {} }; /// A cache of special member function overload resolution results /// for C++ records. llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache; /// A cache of the flags available in enumerations with the flag_bits /// attribute. mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache; /// The kind of translation unit we are processing. /// /// When we're processing a complete translation unit, Sema will perform /// end-of-translation-unit semantic tasks (such as creating /// initializers for tentative definitions in C) once parsing has /// completed. Modules and precompiled headers perform different kinds of /// checks. TranslationUnitKind TUKind; llvm::BumpPtrAllocator BumpAlloc; /// The number of SFINAE diagnostics that have been trapped. unsigned NumSFINAEErrors; typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>> UnparsedDefaultArgInstantiationsMap; /// A mapping from parameters with unparsed default arguments to the /// set of instantiations of each parameter. /// /// This mapping is a temporary data structure used when parsing /// nested class templates or nested classes of class templates, /// where we might end up instantiating an inner class before the /// default arguments of its methods have been parsed. UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations; // Contains the locations of the beginning of unparsed default // argument locations. llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs; /// UndefinedInternals - all the used, undefined objects which require a /// definition in this translation unit. llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed; /// Determine if VD, which must be a variable or function, is an external /// symbol that nonetheless can't be referenced from outside this translation /// unit because its type has no linkage and it's not extern "C". bool isExternalWithNoLinkageType(ValueDecl *VD); /// Obtain a sorted list of functions that are undefined but ODR-used. void getUndefinedButUsed( SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined); /// Retrieves list of suspicious delete-expressions that will be checked at /// the end of translation unit. const llvm::MapVector<FieldDecl *, DeleteLocs> & getMismatchingDeleteExpressions() const; typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods; typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool; /// Method Pool - allows efficient lookup when typechecking messages to "id". /// We need to maintain a list, since selectors can have differing signatures /// across classes. In Cocoa, this happens to be extremely uncommon (only 1% /// of selectors are "overloaded"). /// At the head of the list it is recorded whether there were 0, 1, or >= 2 /// methods inside categories with a particular selector. GlobalMethodPool MethodPool; /// Method selectors used in a \@selector expression. Used for implementation /// of -Wselector. llvm::MapVector<Selector, SourceLocation> ReferencedSelectors; /// List of SourceLocations where 'self' is implicitly retained inside a /// block. llvm::SmallVector<std::pair<SourceLocation, const BlockDecl *>, 1> ImplicitlyRetainedSelfLocs; /// Kinds of C++ special members. enum CXXSpecialMember { CXXDefaultConstructor, CXXCopyConstructor, CXXMoveConstructor, CXXCopyAssignment, CXXMoveAssignment, CXXDestructor, CXXInvalid }; typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember> SpecialMemberDecl; /// The C++ special members which we are currently in the process of /// declaring. If this process recursively triggers the declaration of the /// same special member, we should act as if it is not yet declared. llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared; /// The function definitions which were renamed as part of typo-correction /// to match their respective declarations. We want to keep track of them /// to ensure that we don't emit a "redefinition" error if we encounter a /// correctly named definition after the renamed definition. llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions; /// Stack of types that correspond to the parameter entities that are /// currently being copy-initialized. Can be empty. llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes; void ReadMethodPool(Selector Sel); void updateOutOfDateSelector(Selector Sel); /// Private Helper predicate to check for 'self'. bool isSelfExpr(Expr *RExpr); bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method); /// Cause the active diagnostic on the DiagosticsEngine to be /// emitted. This is closely coupled to the SemaDiagnosticBuilder class and /// should not be used elsewhere. void EmitCurrentDiagnostic(unsigned DiagID); /// Records and restores the FP_CONTRACT state on entry/exit of compound /// statements. class FPContractStateRAII { public: FPContractStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.FPFeatures) {} ~FPContractStateRAII() { S.FPFeatures = OldFPFeaturesState; } private: Sema& S; FPOptions OldFPFeaturesState; }; void addImplicitTypedef(StringRef Name, QualType T); public: Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, TranslationUnitKind TUKind = TU_Complete, CodeCompleteConsumer *CompletionConsumer = nullptr); ~Sema(); /// Perform initialization that occurs after the parser has been /// initialized but before it parses anything. void Initialize(); const LangOptions &getLangOpts() const { return LangOpts; } OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; } FPOptions &getFPOptions() { return FPFeatures; } DiagnosticsEngine &getDiagnostics() const { return Diags; } SourceManager &getSourceManager() const { return SourceMgr; } Preprocessor &getPreprocessor() const { return PP; } ASTContext &getASTContext() const { return Context; } ASTConsumer &getASTConsumer() const { return Consumer; } ASTMutationListener *getASTMutationListener() const; ExternalSemaSource* getExternalSource() const { return ExternalSource; } ///Registers an external source. If an external source already exists, /// creates a multiplex external source and appends to it. /// ///\param[in] E - A non-null external sema source. /// void addExternalSource(ExternalSemaSource *E); void PrintStats() const; /// Helper class that creates diagnostics with optional /// template instantiation stacks. /// /// This class provides a wrapper around the basic DiagnosticBuilder /// class that emits diagnostics. SemaDiagnosticBuilder is /// responsible for emitting the diagnostic (as DiagnosticBuilder /// does) and, if the diagnostic comes from inside a template /// instantiation, printing the template instantiation stack as /// well. class SemaDiagnosticBuilder : public DiagnosticBuilder { Sema &SemaRef; unsigned DiagID; public: SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID) : DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { } // This is a cunning lie. DiagnosticBuilder actually performs move // construction in its copy constructor (but due to varied uses, it's not // possible to conveniently express this as actual move construction). So // the default copy ctor here is fine, because the base class disables the // source anyway, so the user-defined ~SemaDiagnosticBuilder is a safe no-op // in that case anwyay. SemaDiagnosticBuilder(const SemaDiagnosticBuilder&) = default; ~SemaDiagnosticBuilder() { // If we aren't active, there is nothing to do. if (!isActive()) return; // Otherwise, we need to emit the diagnostic. First flush the underlying // DiagnosticBuilder data, and clear the diagnostic builder itself so it // won't emit the diagnostic in its own destructor. // // This seems wasteful, in that as written the DiagnosticBuilder dtor will // do its own needless checks to see if the diagnostic needs to be // emitted. However, because we take care to ensure that the builder // objects never escape, a sufficiently smart compiler will be able to // eliminate that code. FlushCounts(); Clear(); // Dispatch to Sema to emit the diagnostic. SemaRef.EmitCurrentDiagnostic(DiagID); } /// Teach operator<< to produce an object of the correct type. template<typename T> friend const SemaDiagnosticBuilder &operator<<( const SemaDiagnosticBuilder &Diag, const T &Value) { const DiagnosticBuilder &BaseDiag = Diag; BaseDiag << Value; return Diag; } }; /// Emit a diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) { DiagnosticBuilder DB = Diags.Report(Loc, DiagID); return SemaDiagnosticBuilder(DB, *this, DiagID); } /// Emit a partial diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD); /// Build a partial diagnostic. PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h bool findMacroSpelling(SourceLocation &loc, StringRef name); /// Get a string to suggest for zero-initialization of a type. std::string getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const; std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const; /// Calls \c Lexer::getLocForEndOfToken() SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0); /// Retrieve the module loader associated with the preprocessor. ModuleLoader &getModuleLoader() const; void emitAndClearUnusedLocalTypedefWarnings(); enum TUFragmentKind { /// The global module fragment, between 'module;' and a module-declaration. Global, /// A normal translation unit fragment. For a non-module unit, this is the /// entire translation unit. Otherwise, it runs from the module-declaration /// to the private-module-fragment (if any) or the end of the TU (if not). Normal, /// The private module fragment, between 'module :private;' and the end of /// the translation unit. Private }; void ActOnStartOfTranslationUnit(); void ActOnEndOfTranslationUnit(); void ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind); void CheckDelegatingCtorCycles(); Scope *getScopeForContext(DeclContext *Ctx); void PushFunctionScope(); void PushBlockScope(Scope *BlockScope, BlockDecl *Block); sema::LambdaScopeInfo *PushLambdaScope(); /// This is used to inform Sema what the current TemplateParameterDepth /// is during Parsing. Currently it is used to pass on the depth /// when parsing generic lambda 'auto' parameters. void RecordParsingTemplateParameterDepth(unsigned Depth); void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD, RecordDecl *RD, CapturedRegionKind K); void PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr, const Decl *D = nullptr, const BlockExpr *blkExpr = nullptr); sema::FunctionScopeInfo *getCurFunction() const { return FunctionScopes.empty() ? nullptr : FunctionScopes.back(); } sema::FunctionScopeInfo *getEnclosingFunction() const; void setFunctionHasBranchIntoScope(); void setFunctionHasBranchProtectedScope(); void setFunctionHasIndirectGoto(); void PushCompoundScope(bool IsStmtExpr); void PopCompoundScope(); sema::CompoundScopeInfo &getCurCompoundScope() const; bool hasAnyUnrecoverableErrorsInThisFunction() const; /// Retrieve the current block, if any. sema::BlockScopeInfo *getCurBlock(); /// Retrieve the current lambda scope info, if any. /// \param IgnoreNonLambdaCapturingScope true if should find the top-most /// lambda scope info ignoring all inner capturing scopes that are not /// lambda scopes. sema::LambdaScopeInfo * getCurLambda(bool IgnoreNonLambdaCapturingScope = false); /// Retrieve the current generic lambda info, if any. sema::LambdaScopeInfo *getCurGenericLambda(); /// Retrieve the current captured region, if any. sema::CapturedRegionScopeInfo *getCurCapturedRegion(); /// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; } void ActOnComment(SourceRange Comment); //===--------------------------------------------------------------------===// // Type Analysis / Processing: SemaType.cpp. // QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs, const DeclSpec *DS = nullptr); QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA, const DeclSpec *DS = nullptr); QualType BuildPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildReferenceType(QualType T, bool LValueRef, SourceLocation Loc, DeclarationName Entity); QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM, Expr *ArraySize, unsigned Quals, SourceRange Brackets, DeclarationName Entity); QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc); QualType BuildExtVectorType(QualType T, Expr *ArraySize, SourceLocation AttrLoc); QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace, SourceLocation AttrLoc); /// Same as above, but constructs the AddressSpace index if not provided. QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace, SourceLocation AttrLoc); bool CheckFunctionReturnType(QualType T, SourceLocation Loc); /// Build a function type. /// /// This routine checks the function type according to C++ rules and /// under the assumption that the result type and parameter types have /// just been instantiated from a template. It therefore duplicates /// some of the behavior of GetTypeForDeclarator, but in a much /// simpler form that is only suitable for this narrow use case. /// /// \param T The return type of the function. /// /// \param ParamTypes The parameter types of the function. This array /// will be modified to account for adjustments to the types of the /// function parameters. /// /// \param Loc The location of the entity whose type involves this /// function type or, if there is no such entity, the location of the /// type that will have function type. /// /// \param Entity The name of the entity that involves the function /// type, if known. /// /// \param EPI Extra information about the function type. Usually this will /// be taken from an existing function with the same prototype. /// /// \returns A suitable function type, if there are no errors. The /// unqualified type will always be a FunctionProtoType. /// Otherwise, returns a NULL type. QualType BuildFunctionType(QualType T, MutableArrayRef<QualType> ParamTypes, SourceLocation Loc, DeclarationName Entity, const FunctionProtoType::ExtProtoInfo &EPI); QualType BuildMemberPointerType(QualType T, QualType Class, SourceLocation Loc, DeclarationName Entity); QualType BuildBlockPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildParenType(QualType T); QualType BuildAtomicType(QualType T, SourceLocation Loc); QualType BuildReadPipeType(QualType T, SourceLocation Loc); QualType BuildWritePipeType(QualType T, SourceLocation Loc); TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S); TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy); /// Package the given type and TSI into a ParsedType. ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo); DeclarationNameInfo GetNameForDeclarator(Declarator &D); DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name); static QualType GetTypeFromParser(ParsedType Ty, TypeSourceInfo **TInfo = nullptr); CanThrowResult canThrow(const Expr *E); const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc, const FunctionProtoType *FPT); void UpdateExceptionSpec(FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI); bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range); bool CheckDistantExceptionSpec(QualType T); bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New); bool CheckEquivalentExceptionSpec( const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool CheckEquivalentExceptionSpec( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool handlerCanCatch(QualType HandlerType, QualType ExceptionType); bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID, const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const FunctionProtoType *Superset, SourceLocation SuperLoc, const FunctionProtoType *Subset, SourceLocation SubLoc); bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const FunctionProtoType *Target, SourceLocation TargetLoc, const FunctionProtoType *Source, SourceLocation SourceLoc); TypeResult ActOnTypeName(Scope *S, Declarator &D); /// The parser has parsed the context-sensitive type 'instancetype' /// in an Objective-C message declaration. Return the appropriate type. ParsedType ActOnObjCInstanceType(SourceLocation Loc); /// Abstract class used to diagnose incomplete types. struct TypeDiagnoser { TypeDiagnoser() {} virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0; virtual ~TypeDiagnoser() {} }; static int getPrintable(int I) { return I; } static unsigned getPrintable(unsigned I) { return I; } static bool getPrintable(bool B) { return B; } static const char * getPrintable(const char *S) { return S; } static StringRef getPrintable(StringRef S) { return S; } static const std::string &getPrintable(const std::string &S) { return S; } static const IdentifierInfo *getPrintable(const IdentifierInfo *II) { return II; } static DeclarationName getPrintable(DeclarationName N) { return N; } static QualType getPrintable(QualType T) { return T; } static SourceRange getPrintable(SourceRange R) { return R; } static SourceRange getPrintable(SourceLocation L) { return L; } static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); } static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();} template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser { unsigned DiagID; std::tuple<const Ts &...> Args; template <std::size_t... Is> void emit(const SemaDiagnosticBuilder &DB, llvm::index_sequence<Is...>) const { // Apply all tuple elements to the builder in order. bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...}; (void)Dummy; } public: BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args) : TypeDiagnoser(), DiagID(DiagID), Args(Args...) { assert(DiagID != 0 && "no diagnostic for type diagnoser"); } void diagnose(Sema &S, SourceLocation Loc, QualType T) override { const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID); emit(DB, llvm::index_sequence_for<Ts...>()); DB << T; } }; private: /// Methods for marking which expressions involve dereferencing a pointer /// marked with the 'noderef' attribute. Expressions are checked bottom up as /// they are parsed, meaning that a noderef pointer may not be accessed. For /// example, in `&*p` where `p` is a noderef pointer, we will first parse the /// `*p`, but need to check that `address of` is called on it. This requires /// keeping a container of all pending expressions and checking if the address /// of them are eventually taken. void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E); void CheckAddressOfNoDeref(const Expr *E); void CheckMemberAccessOfNoDeref(const MemberExpr *E); bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T, TypeDiagnoser *Diagnoser); struct ModuleScope { SourceLocation BeginLoc; clang::Module *Module = nullptr; bool ModuleInterface = false; bool ImplicitGlobalModuleFragment = false; VisibleModuleSet OuterVisibleModules; }; /// The modules we're currently parsing. llvm::SmallVector<ModuleScope, 16> ModuleScopes; /// Namespace definitions that we will export when they finish. llvm::SmallPtrSet<const NamespaceDecl*, 8> DeferredExportedNamespaces; /// Get the module whose scope we are currently within. Module *getCurrentModule() const { return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module; } VisibleModuleSet VisibleModules; public: /// Get the module owning an entity. Module *getOwningModule(Decl *Entity) { return Entity->getOwningModule(); } /// Make a merged definition of an existing hidden definition \p ND /// visible at the specified location. void makeMergedDefinitionVisible(NamedDecl *ND); bool isModuleVisible(const Module *M, bool ModulePrivate = false); /// Determine whether a declaration is visible to name lookup. bool isVisible(const NamedDecl *D) { return !D->isHidden() || isVisibleSlow(D); } /// Determine whether any declaration of an entity is visible. bool hasVisibleDeclaration(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr) { return isVisible(D) || hasVisibleDeclarationSlow(D, Modules); } bool hasVisibleDeclarationSlow(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules); bool hasVisibleMergedDefinition(NamedDecl *Def); bool hasMergedDefinitionInCurrentModule(NamedDecl *Def); /// Determine if \p D and \p Suggested have a structurally compatible /// layout as described in C11 6.2.7/1. bool hasStructuralCompatLayout(Decl *D, Decl *Suggested); /// Determine if \p D has a visible definition. If not, suggest a declaration /// that should be made visible to expose the definition. bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested, bool OnlyNeedComplete = false); bool hasVisibleDefinition(const NamedDecl *D) { NamedDecl *Hidden; return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden); } /// Determine if the template parameter \p D has a visible default argument. bool hasVisibleDefaultArgument(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is an explicit /// specialization declaration for a specialization of a template. (For a /// member specialization, use hasVisibleMemberSpecialization.) bool hasVisibleExplicitSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is a member /// specialization declaration (as opposed to an instantiated declaration). bool hasVisibleMemberSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if \p A and \p B are equivalent internal linkage declarations /// from different modules, and thus an ambiguity error can be downgraded to /// an extension warning. bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A, const NamedDecl *B); void diagnoseEquivalentInternalLinkageDeclarations( SourceLocation Loc, const NamedDecl *D, ArrayRef<const NamedDecl *> Equiv); bool isUsualDeallocationFunction(const CXXMethodDecl *FD); bool isCompleteType(SourceLocation Loc, QualType T) { return !RequireCompleteTypeImpl(Loc, T, nullptr); } bool RequireCompleteType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteType(Loc, T, Diagnoser); } void completeExprArrayBound(Expr *E); bool RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser); bool RequireCompleteExprType(Expr *E, unsigned DiagID); template <typename... Ts> bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteExprType(E, Diagnoser); } bool RequireLiteralType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireLiteralType(Loc, T, Diagnoser); } QualType getElaboratedType(ElaboratedTypeKeyword Keyword, const CXXScopeSpec &SS, QualType T, TagDecl *OwnedTagDecl = nullptr); QualType BuildTypeofExprType(Expr *E, SourceLocation Loc); /// If AsUnevaluated is false, E is treated as though it were an evaluated /// context, such as when building a type for decltype(auto). QualType BuildDecltypeType(Expr *E, SourceLocation Loc, bool AsUnevaluated = true); QualType BuildUnaryTransformType(QualType BaseType, UnaryTransformType::UTTKind UKind, SourceLocation Loc); //===--------------------------------------------------------------------===// // Symbol table / Decl tracking callbacks: SemaDecl.cpp. // struct SkipBodyInfo { SkipBodyInfo() : ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr), New(nullptr) {} bool ShouldSkip; bool CheckSameAsPrevious; NamedDecl *Previous; NamedDecl *New; }; DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr); void DiagnoseUseOfUnimplementedSelectors(); bool isSimpleTypeSpecifier(tok::TokenKind Kind) const; ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec *SS = nullptr, bool isClassName = false, bool HasTrailingDot = false, ParsedType ObjectType = nullptr, bool IsCtorOrDtorName = false, bool WantNontrivialTypeSourceInfo = false, bool IsClassTemplateDeductionContext = true, IdentifierInfo **CorrectedII = nullptr); TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S); bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S); void DiagnoseUnknownTypeName(IdentifierInfo *&II, SourceLocation IILoc, Scope *S, CXXScopeSpec *SS, ParsedType &SuggestedType, bool IsTemplateName = false); /// Attempt to behave like MSVC in situations where lookup of an unqualified /// type name has failed in a dependent context. In these situations, we /// automatically form a DependentTypeName that will retry lookup in a related /// scope during instantiation. ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II, SourceLocation NameLoc, bool IsTemplateTypeArg); /// Describes the result of the name lookup and resolution performed /// by \c ClassifyName(). enum NameClassificationKind { NC_Unknown, NC_Error, NC_Keyword, NC_Type, NC_Expression, NC_NestedNameSpecifier, NC_TypeTemplate, NC_VarTemplate, NC_FunctionTemplate, NC_UndeclaredTemplate, }; class NameClassification { NameClassificationKind Kind; ExprResult Expr; TemplateName Template; ParsedType Type; explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {} public: NameClassification(ExprResult Expr) : Kind(NC_Expression), Expr(Expr) {} NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {} NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {} static NameClassification Error() { return NameClassification(NC_Error); } static NameClassification Unknown() { return NameClassification(NC_Unknown); } static NameClassification NestedNameSpecifier() { return NameClassification(NC_NestedNameSpecifier); } static NameClassification TypeTemplate(TemplateName Name) { NameClassification Result(NC_TypeTemplate); Result.Template = Name; return Result; } static NameClassification VarTemplate(TemplateName Name) { NameClassification Result(NC_VarTemplate); Result.Template = Name; return Result; } static NameClassification FunctionTemplate(TemplateName Name) { NameClassification Result(NC_FunctionTemplate); Result.Template = Name; return Result; } static NameClassification UndeclaredTemplate(TemplateName Name) { NameClassification Result(NC_UndeclaredTemplate); Result.Template = Name; return Result; } NameClassificationKind getKind() const { return Kind; } ParsedType getType() const { assert(Kind == NC_Type); return Type; } ExprResult getExpression() const { assert(Kind == NC_Expression); return Expr; } TemplateName getTemplateName() const { assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate || Kind == NC_VarTemplate || Kind == NC_UndeclaredTemplate); return Template; } TemplateNameKind getTemplateNameKind() const { switch (Kind) { case NC_TypeTemplate: return TNK_Type_template; case NC_FunctionTemplate: return TNK_Function_template; case NC_VarTemplate: return TNK_Var_template; case NC_UndeclaredTemplate: return TNK_Undeclared_template; default: llvm_unreachable("unsupported name classification."); } } }; /// Perform name lookup on the given name, classifying it based on /// the results of name lookup and the following token. /// /// This routine is used by the parser to resolve identifiers and help direct /// parsing. When the identifier cannot be found, this routine will attempt /// to correct the typo and classify based on the resulting name. /// /// \param S The scope in which we're performing name lookup. /// /// \param SS The nested-name-specifier that precedes the name. /// /// \param Name The identifier. If typo correction finds an alternative name, /// this pointer parameter will be updated accordingly. /// /// \param NameLoc The location of the identifier. /// /// \param NextToken The token following the identifier. Used to help /// disambiguate the name. /// /// \param IsAddressOfOperand True if this name is the operand of a unary /// address of ('&') expression, assuming it is classified as an /// expression. /// /// \param CCC The correction callback, if typo correction is desired. NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name, SourceLocation NameLoc, const Token &NextToken, bool IsAddressOfOperand, CorrectionCandidateCallback *CCC = nullptr); /// Describes the detailed kind of a template name. Used in diagnostics. enum class TemplateNameKindForDiagnostics { ClassTemplate, FunctionTemplate, VarTemplate, AliasTemplate, TemplateTemplateParam, DependentTemplate }; TemplateNameKindForDiagnostics getTemplateNameKindForDiagnostics(TemplateName Name); /// Determine whether it's plausible that E was intended to be a /// template-name. bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) { if (!getLangOpts().CPlusPlus || E.isInvalid()) return false; Dependent = false; if (auto *DRE = dyn_cast<DeclRefExpr>(E.get())) return !DRE->hasExplicitTemplateArgs(); if (auto *ME = dyn_cast<MemberExpr>(E.get())) return !ME->hasExplicitTemplateArgs(); Dependent = true; if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get())) return !DSDRE->hasExplicitTemplateArgs(); if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get())) return !DSME->hasExplicitTemplateArgs(); // Any additional cases recognized here should also be handled by // diagnoseExprIntendedAsTemplateName. return false; } void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName, SourceLocation Less, SourceLocation Greater); Decl *ActOnDeclarator(Scope *S, Declarator &D); NamedDecl *HandleDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists); void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S); bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info); bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC, DeclarationName Name, SourceLocation Loc, bool IsTemplateId); void diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals, SourceLocation FallbackLoc, SourceLocation ConstQualLoc = SourceLocation(), SourceLocation VolatileQualLoc = SourceLocation(), SourceLocation RestrictQualLoc = SourceLocation(), SourceLocation AtomicQualLoc = SourceLocation(), SourceLocation UnalignedQualLoc = SourceLocation()); static bool adjustContextForLocalExternDecl(DeclContext *&DC); void DiagnoseFunctionSpecifiers(const DeclSpec &DS); NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D, const LookupResult &R); NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R); void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl, const LookupResult &R); void CheckShadow(Scope *S, VarDecl *D); /// Warn if 'E', which is an expression that is about to be modified, refers /// to a shadowing declaration. void CheckShadowingDeclModification(Expr *E, SourceLocation Loc); void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI); private: /// Map of current shadowing declarations to shadowed declarations. Warn if /// it looks like the user is trying to modify the shadowing declaration. llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls; public: void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange); void handleTagNumbering(const TagDecl *Tag, Scope *TagScope); void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec, TypedefNameDecl *NewTD); void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D); NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous); NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D, LookupResult &Previous, bool &Redeclaration); NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope, ArrayRef<BindingDecl *> Bindings = None); NamedDecl * ActOnDecompositionDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists); // Returns true if the variable declaration is a redeclaration bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous); void CheckVariableDeclarationType(VarDecl *NewVD); bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit, Expr *Init); void CheckCompleteVariableDeclaration(VarDecl *VD); void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD); void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D); NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope); bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD); bool CheckConstexprFunctionDecl(const FunctionDecl *FD); bool CheckConstexprFunctionBody(const FunctionDecl *FD, Stmt *Body); void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD); void FindHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); void NoteHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); // Returns true if the function declaration is a redeclaration bool CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD, LookupResult &Previous, bool IsMemberSpecialization); bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl); bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD, QualType NewT, QualType OldT); void CheckMain(FunctionDecl *FD, const DeclSpec &D); void CheckMSVCRTEntryPoint(FunctionDecl *FD); Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD, bool IsDefinition); Decl *ActOnParamDeclarator(Scope *S, Declarator &D); ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC, SourceLocation Loc, QualType T); ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc, SourceLocation NameLoc, IdentifierInfo *Name, QualType T, TypeSourceInfo *TSInfo, StorageClass SC); void ActOnParamDefaultArgument(Decl *param, SourceLocation EqualLoc, Expr *defarg); void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc, SourceLocation ArgLoc); void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc); bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit); void ActOnUninitializedDecl(Decl *dcl); void ActOnInitializerError(Decl *Dcl); void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc); void ActOnCXXForRangeDecl(Decl *D); StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc, IdentifierInfo *Ident, ParsedAttributes &Attrs, SourceLocation AttrEnd); void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc); void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc); void CheckStaticLocalForDllExport(VarDecl *VD); void FinalizeDeclaration(Decl *D); DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS, ArrayRef<Decl *> Group); DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group); /// Should be called on all declarations that might have attached /// documentation comments. void ActOnDocumentableDecl(Decl *D); void ActOnDocumentableDecls(ArrayRef<Decl *> Group); void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D, SourceLocation LocAfterDecls); void CheckForFunctionRedefinition( FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D, SkipBodyInfo *SkipBody = nullptr); void ActOnStartOfObjCMethodDef(Scope *S, Decl *D); bool isObjCMethodDecl(Decl *D) { return D && isa<ObjCMethodDecl>(D); } /// Determine whether we can delay parsing the body of a function or /// function template until it is used, assuming we don't care about emitting /// code for that function. /// /// This will be \c false if we may need the body of the function in the /// middle of parsing an expression (where it's impractical to switch to /// parsing a different function), for instance, if it's constexpr in C++11 /// or has an 'auto' return type in C++14. These cases are essentially bugs. bool canDelayFunctionBody(const Declarator &D); /// Determine whether we can skip parsing the body of a function /// definition, assuming we don't care about analyzing its body or emitting /// code for that function. /// /// This will be \c false only if we may need the body of the function in /// order to parse the rest of the program (for instance, if it is /// \c constexpr in C++11 or has an 'auto' return type in C++14). bool canSkipFunctionBody(Decl *D); void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation); Decl *ActOnSkippedFunctionBody(Decl *Decl); void ActOnFinishInlineFunctionDef(FunctionDecl *D); /// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an /// attribute for which parsing is delayed. void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs); /// Diagnose any unused parameters in the given sequence of /// ParmVarDecl pointers. void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters); /// Diagnose whether the size of parameters or return value of a /// function or obj-c method definition is pass-by-value and larger than a /// specified threshold. void DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters, QualType ReturnTy, NamedDecl *D); void DiagnoseInvalidJumps(Stmt *Body); Decl *ActOnFileScopeAsmDecl(Expr *expr, SourceLocation AsmLoc, SourceLocation RParenLoc); /// Handle a C++11 empty-declaration and attribute-declaration. Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList, SourceLocation SemiLoc); enum class ModuleDeclKind { Interface, ///< 'export module X;' Implementation, ///< 'module X;' }; /// The parser has processed a module-declaration that begins the definition /// of a module interface or implementation. DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc, SourceLocation ModuleLoc, ModuleDeclKind MDK, ModuleIdPath Path, bool IsFirstDecl); /// The parser has processed a global-module-fragment declaration that begins /// the definition of the global module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. DeclGroupPtrTy ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc); /// The parser has processed a private-module-fragment declaration that begins /// the definition of the private module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. /// \param PrivateLoc The location of the 'private' keyword. DeclGroupPtrTy ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc, SourceLocation PrivateLoc); /// The parser has processed a module import declaration. /// /// \param StartLoc The location of the first token in the declaration. This /// could be the location of an '@', 'export', or 'import'. /// \param ExportLoc The location of the 'export' keyword, if any. /// \param ImportLoc The location of the 'import' keyword. /// \param Path The module access path. DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, ModuleIdPath Path); DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, Module *M, ModuleIdPath Path = {}); /// The parser has processed a module import translated from a /// #include or similar preprocessing directive. void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod); void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod); /// The parsed has entered a submodule. void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod); /// The parser has left a submodule. void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod); /// Create an implicit import of the given module at the given /// source location, for error recovery, if possible. /// /// This routine is typically used when an entity found by name lookup /// is actually hidden within a module that we know about but the user /// has forgotten to import. void createImplicitModuleImportForErrorRecovery(SourceLocation Loc, Module *Mod); /// Kinds of missing import. Note, the values of these enumerators correspond /// to %select values in diagnostics. enum class MissingImportKind { Declaration, Definition, DefaultArgument, ExplicitSpecialization, PartialSpecialization }; /// Diagnose that the specified declaration needs to be visible but /// isn't, and suggest a module import that would resolve the problem. void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, MissingImportKind MIK, bool Recover = true); void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, SourceLocation DeclLoc, ArrayRef<Module *> Modules, MissingImportKind MIK, bool Recover); Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc, SourceLocation LBraceLoc); Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl, SourceLocation RBraceLoc); /// We've found a use of a templated declaration that would trigger an /// implicit instantiation. Check that any relevant explicit specializations /// and partial specializations are visible, and diagnose if not. void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// We've found a use of a template specialization that would select a /// partial specialization. Check that the partial specialization is visible, /// and diagnose if not. void checkPartialSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// Retrieve a suitable printing policy for diagnostics. PrintingPolicy getPrintingPolicy() const { return getPrintingPolicy(Context, PP); } /// Retrieve a suitable printing policy for diagnostics. static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx, const Preprocessor &PP); /// Scope actions. void ActOnPopScope(SourceLocation Loc, Scope *S); void ActOnTranslationUnitScope(Scope *S); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, RecordDecl *&AnonRecord); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, MultiTemplateParamsArg TemplateParams, bool IsExplicitInstantiation, RecordDecl *&AnonRecord); Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS, AccessSpecifier AS, RecordDecl *Record, const PrintingPolicy &Policy); Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS, RecordDecl *Record); /// Common ways to introduce type names without a tag for use in diagnostics. /// Keep in sync with err_tag_reference_non_tag. enum NonTagKind { NTK_NonStruct, NTK_NonClass, NTK_NonUnion, NTK_NonEnum, NTK_Typedef, NTK_TypeAlias, NTK_Template, NTK_TypeAliasTemplate, NTK_TemplateTemplateArgument, }; /// Given a non-tag type declaration, returns an enum useful for indicating /// what kind of non-tag type this is. NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK); bool isAcceptableTagRedeclaration(const TagDecl *Previous, TagTypeKind NewTag, bool isDefinition, SourceLocation NewTagLoc, const IdentifierInfo *Name); enum TagUseKind { TUK_Reference, // Reference to a tag: 'struct foo *X;' TUK_Declaration, // Fwd decl of a tag: 'struct foo;' TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;' TUK_Friend // Friend declaration: 'friend struct foo;' }; Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, AccessSpecifier AS, SourceLocation ModulePrivateLoc, MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl, bool &IsDependent, SourceLocation ScopedEnumKWLoc, bool ScopedEnumUsesClassTag, TypeResult UnderlyingType, bool IsTypeSpecifier, bool IsTemplateParamOrArg, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc, unsigned TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, MultiTemplateParamsArg TempParamLists); TypeResult ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK, const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation TagLoc, SourceLocation NameLoc); void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart, IdentifierInfo *ClassName, SmallVectorImpl<Decl *> &Decls); Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth); FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS); MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS, const ParsedAttr &MSPropertyAttr); FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T, TypeSourceInfo *TInfo, RecordDecl *Record, SourceLocation Loc, bool Mutable, Expr *BitfieldWidth, InClassInitStyle InitStyle, SourceLocation TSSL, AccessSpecifier AS, NamedDecl *PrevDecl, Declarator *D = nullptr); bool CheckNontrivialField(FieldDecl *FD); void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM); enum TrivialABIHandling { /// The triviality of a method unaffected by "trivial_abi". TAH_IgnoreTrivialABI, /// The triviality of a method affected by "trivial_abi". TAH_ConsiderTrivialABI }; bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM, TrivialABIHandling TAH = TAH_IgnoreTrivialABI, bool Diagnose = false); CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD); void ActOnLastBitfield(SourceLocation DeclStart, SmallVectorImpl<Decl *> &AllIvarDecls); Decl *ActOnIvar(Scope *S, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, tok::ObjCKeywordKind visibility); // This is used for both record definitions and ObjC interface declarations. void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl, ArrayRef<Decl *> Fields, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); /// ActOnTagStartDefinition - Invoked when we have entered the /// scope of a tag's definition (e.g., for an enumeration, class, /// struct, or union). void ActOnTagStartDefinition(Scope *S, Decl *TagDecl); /// Perform ODR-like check for C/ObjC when merging tag types from modules. /// Differently from C++, actually parse the body and reject / error out /// in case of a structural mismatch. bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev, SkipBodyInfo &SkipBody); typedef void *SkippedDefinitionContext; /// Invoked when we enter a tag definition that we're skipping. SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD); Decl *ActOnObjCContainerStartDefinition(Decl *IDecl); /// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a /// C++ record definition's base-specifiers clause and are starting its /// member declarations. void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl, SourceLocation FinalLoc, bool IsFinalSpelledSealed, SourceLocation LBraceLoc); /// ActOnTagFinishDefinition - Invoked once we have finished parsing /// the definition of a tag (enumeration, class, struct, or union). void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl, SourceRange BraceRange); void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context); void ActOnObjCContainerFinishDefinition(); /// Invoked when we must temporarily exit the objective-c container /// scope for parsing/looking-up C constructs. /// /// Must be followed by a call to \see ActOnObjCReenterContainerContext void ActOnObjCTemporaryExitContainerContext(DeclContext *DC); void ActOnObjCReenterContainerContext(DeclContext *DC); /// ActOnTagDefinitionError - Invoked when there was an unrecoverable /// error parsing the definition of a tag. void ActOnTagDefinitionError(Scope *S, Decl *TagDecl); EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum, EnumConstantDecl *LastEnumConst, SourceLocation IdLoc, IdentifierInfo *Id, Expr *val); bool CheckEnumUnderlyingType(TypeSourceInfo *TI); bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped, QualType EnumUnderlyingTy, bool IsFixed, const EnumDecl *Prev); /// Determine whether the body of an anonymous enumeration should be skipped. /// \param II The name of the first enumerator. SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II, SourceLocation IILoc); Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant, SourceLocation IdLoc, IdentifierInfo *Id, const ParsedAttributesView &Attrs, SourceLocation EqualLoc, Expr *Val); void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange, Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S, const ParsedAttributesView &Attr); DeclContext *getContainingDC(DeclContext *DC); /// Set the current declaration context until it gets popped. void PushDeclContext(Scope *S, DeclContext *DC); void PopDeclContext(); /// EnterDeclaratorContext - Used when we must lookup names in the context /// of a declarator's nested name specifier. void EnterDeclaratorContext(Scope *S, DeclContext *DC); void ExitDeclaratorContext(Scope *S); /// Push the parameters of D, which must be a function, into scope. void ActOnReenterFunctionContext(Scope* S, Decl* D); void ActOnExitFunctionContext(); DeclContext *getFunctionLevelDeclContext(); /// getCurFunctionDecl - If inside of a function body, this returns a pointer /// to the function decl for the function being parsed. If we're currently /// in a 'block', this returns the containing context. FunctionDecl *getCurFunctionDecl(); /// getCurMethodDecl - If inside of a method body, this returns a pointer to /// the method decl for the method being parsed. If we're currently /// in a 'block', this returns the containing context. ObjCMethodDecl *getCurMethodDecl(); /// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method /// or C function we're in, otherwise return null. If we're currently /// in a 'block', this returns the containing context. NamedDecl *getCurFunctionOrMethodDecl(); /// Add this decl to the scope shadowed decl chains. void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true); /// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true /// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns /// true if 'D' belongs to the given declaration context. /// /// \param AllowInlineNamespace If \c true, allow the declaration to be in the /// enclosing namespace set of the context, rather than contained /// directly within it. bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr, bool AllowInlineNamespace = false); /// Finds the scope corresponding to the given decl context, if it /// happens to be an enclosing scope. Otherwise return NULL. static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC); /// Subroutines of ActOnDeclarator(). TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T, TypeSourceInfo *TInfo); bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New); /// Describes the kind of merge to perform for availability /// attributes (including "deprecated", "unavailable", and "availability"). enum AvailabilityMergeKind { /// Don't merge availability attributes at all. AMK_None, /// Merge availability attributes for a redeclaration, which requires /// an exact match. AMK_Redeclaration, /// Merge availability attributes for an override, which requires /// an exact match or a weakening of constraints. AMK_Override, /// Merge availability attributes for an implementation of /// a protocol requirement. AMK_ProtocolImplementation, }; /// Describes the kind of priority given to an availability attribute. /// /// The sum of priorities deteremines the final priority of the attribute. /// The final priority determines how the attribute will be merged. /// An attribute with a lower priority will always remove higher priority /// attributes for the specified platform when it is being applied. An /// attribute with a higher priority will not be applied if the declaration /// already has an availability attribute with a lower priority for the /// specified platform. The final prirority values are not expected to match /// the values in this enumeration, but instead should be treated as a plain /// integer value. This enumeration just names the priority weights that are /// used to calculate that final vaue. enum AvailabilityPriority : int { /// The availability attribute was specified explicitly next to the /// declaration. AP_Explicit = 0, /// The availability attribute was applied using '#pragma clang attribute'. AP_PragmaClangAttribute = 1, /// The availability attribute for a specific platform was inferred from /// an availability attribute for another platform. AP_InferredFromOtherPlatform = 2 }; /// Attribute merging methods. Return true if a new attribute was added. AvailabilityAttr *mergeAvailabilityAttr( NamedDecl *D, SourceRange Range, IdentifierInfo *Platform, bool Implicit, VersionTuple Introduced, VersionTuple Deprecated, VersionTuple Obsoleted, bool IsUnavailable, StringRef Message, bool IsStrict, StringRef Replacement, AvailabilityMergeKind AMK, int Priority, unsigned AttrSpellingListIndex); TypeVisibilityAttr *mergeTypeVisibilityAttr(Decl *D, SourceRange Range, TypeVisibilityAttr::VisibilityType Vis, unsigned AttrSpellingListIndex); VisibilityAttr *mergeVisibilityAttr(Decl *D, SourceRange Range, VisibilityAttr::VisibilityType Vis, unsigned AttrSpellingListIndex); UuidAttr *mergeUuidAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex, StringRef Uuid); DLLImportAttr *mergeDLLImportAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); DLLExportAttr *mergeDLLExportAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); MSInheritanceAttr * mergeMSInheritanceAttr(Decl *D, SourceRange Range, bool BestCase, unsigned AttrSpellingListIndex, MSInheritanceAttr::Spelling SemanticSpelling); FormatAttr *mergeFormatAttr(Decl *D, SourceRange Range, IdentifierInfo *Format, int FormatIdx, int FirstArg, unsigned AttrSpellingListIndex); SectionAttr *mergeSectionAttr(Decl *D, SourceRange Range, StringRef Name, unsigned AttrSpellingListIndex); CodeSegAttr *mergeCodeSegAttr(Decl *D, SourceRange Range, StringRef Name, unsigned AttrSpellingListIndex); AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, SourceRange Range, IdentifierInfo *Ident, unsigned AttrSpellingListIndex); MinSizeAttr *mergeMinSizeAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); NoSpeculativeLoadHardeningAttr * mergeNoSpeculativeLoadHardeningAttr(Decl *D, const NoSpeculativeLoadHardeningAttr &AL); SpeculativeLoadHardeningAttr * mergeSpeculativeLoadHardeningAttr(Decl *D, const SpeculativeLoadHardeningAttr &AL); OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const InternalLinkageAttr &AL); CommonAttr *mergeCommonAttr(Decl *D, const ParsedAttr &AL); CommonAttr *mergeCommonAttr(Decl *D, const CommonAttr &AL); void mergeDeclAttributes(NamedDecl *New, Decl *Old, AvailabilityMergeKind AMK = AMK_Redeclaration); void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New, LookupResult &OldDecls); bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S, bool MergeTypeWithOld); bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old, Scope *S, bool MergeTypeWithOld); void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old); void MergeVarDecl(VarDecl *New, LookupResult &Previous); void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld); void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old); bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn); void notePreviousDefinition(const NamedDecl *Old, SourceLocation New); bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S); // AssignmentAction - This is used by all the assignment diagnostic functions // to represent what is actually causing the operation enum AssignmentAction { AA_Assigning, AA_Passing, AA_Returning, AA_Converting, AA_Initializing, AA_Sending, AA_Casting, AA_Passing_CFAudited }; /// C++ Overloading. enum OverloadKind { /// This is a legitimate overload: the existing declarations are /// functions or function templates with different signatures. Ovl_Overload, /// This is not an overload because the signature exactly matches /// an existing declaration. Ovl_Match, /// This is not an overload because the lookup results contain a /// non-function. Ovl_NonFunction }; OverloadKind CheckOverload(Scope *S, FunctionDecl *New, const LookupResult &OldDecls, NamedDecl *&OldDecl, bool IsForUsingDecl); bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl, bool ConsiderCudaAttrs = true); ImplicitConversionSequence TryImplicitConversion(Expr *From, QualType ToType, bool SuppressUserConversions, bool AllowExplicit, bool InOverloadResolution, bool CStyle, bool AllowObjCWritebackConversion); bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType); bool IsFloatingPointPromotion(QualType FromType, QualType ToType); bool IsComplexPromotion(QualType FromType, QualType ToType); bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCWritebackConversion(QualType FromType, QualType ToType, QualType &ConvertedType); bool IsBlockPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType); bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType, const FunctionProtoType *NewType, unsigned *ArgPos = nullptr); void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag, QualType FromType, QualType ToType); void maybeExtendBlockObject(ExprResult &E); CastKind PrepareCastToObjCObjectPointer(ExprResult &E); bool CheckPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath& BasePath, bool IgnoreBaseAccess, bool Diagnose = true); bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType &ConvertedType); bool CheckMemberPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath &BasePath, bool IgnoreBaseAccess); bool IsQualificationConversion(QualType FromType, QualType ToType, bool CStyle, bool &ObjCLifetimeConversion); bool IsFunctionConversion(QualType FromType, QualType ToType, QualType &ResultTy); bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType); bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg); ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity, const VarDecl *NRVOCandidate, QualType ResultType, Expr *Value, bool AllowNRVO = true); bool CanPerformCopyInitialization(const InitializedEntity &Entity, ExprResult Init); ExprResult PerformCopyInitialization(const InitializedEntity &Entity, SourceLocation EqualLoc, ExprResult Init, bool TopLevelOfInitList = false, bool AllowExplicit = false); ExprResult PerformObjectArgumentInitialization(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, CXXMethodDecl *Method); /// Check that the lifetime of the initializer (and its subobjects) is /// sufficient for initializing the entity, and perform lifetime extension /// (when permitted) if not. void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init); ExprResult PerformContextuallyConvertToBool(Expr *From); ExprResult PerformContextuallyConvertToObjCPointer(Expr *From); /// Contexts in which a converted constant expression is required. enum CCEKind { CCEK_CaseValue, ///< Expression in a case label. CCEK_Enumerator, ///< Enumerator value with fixed underlying type. CCEK_TemplateArg, ///< Value of a non-type template parameter. CCEK_NewExpr, ///< Constant expression in a noptr-new-declarator. CCEK_ConstexprIf, ///< Condition in a constexpr if statement. CCEK_ExplicitBool ///< Condition in an explicit(bool) specifier. }; ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, llvm::APSInt &Value, CCEKind CCE); ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, APValue &Value, CCEKind CCE); /// Abstract base class used to perform a contextual implicit /// conversion from an expression to any type passing a filter. class ContextualImplicitConverter { public: bool Suppress; bool SuppressConversion; ContextualImplicitConverter(bool Suppress = false, bool SuppressConversion = false) : Suppress(Suppress), SuppressConversion(SuppressConversion) {} /// Determine whether the specified type is a valid destination type /// for this conversion. virtual bool match(QualType T) = 0; /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the expression has incomplete class type. virtual SemaDiagnosticBuilder diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the only matching conversion function /// is explicit. virtual SemaDiagnosticBuilder diagnoseExplicitConv( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; /// Emits a note for the explicit conversion function. virtual SemaDiagnosticBuilder noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when there are multiple possible conversion /// functions. virtual SemaDiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a note for one of the candidate conversions. virtual SemaDiagnosticBuilder noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when we picked a conversion function /// (for cases when we are not allowed to pick a conversion function). virtual SemaDiagnosticBuilder diagnoseConversion( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; virtual ~ContextualImplicitConverter() {} }; class ICEConvertDiagnoser : public ContextualImplicitConverter { bool AllowScopedEnumerations; public: ICEConvertDiagnoser(bool AllowScopedEnumerations, bool Suppress, bool SuppressConversion) : ContextualImplicitConverter(Suppress, SuppressConversion), AllowScopedEnumerations(AllowScopedEnumerations) {} /// Match an integral or (possibly scoped) enumeration type. bool match(QualType T) override; SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override { return diagnoseNotInt(S, Loc, T); } /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0; }; /// Perform a contextual implicit conversion. ExprResult PerformContextualImplicitConversion( SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter); enum ObjCSubscriptKind { OS_Array, OS_Dictionary, OS_Error }; ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE); // Note that LK_String is intentionally after the other literals, as // this is used for diagnostics logic. enum ObjCLiteralKind { LK_Array, LK_Dictionary, LK_Numeric, LK_Boxed, LK_String, LK_Block, LK_None }; ObjCLiteralKind CheckLiteralKind(Expr *FromE); ExprResult PerformObjectMemberConversion(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, NamedDecl *Member); // Members have to be NamespaceDecl* or TranslationUnitDecl*. // TODO: make this is a typesafe union. typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet; typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet; using ADLCallKind = CallExpr::ADLCallKind; void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, bool AllowExplicitConversion = false, ADLCallKind IsADLCandidate = ADLCallKind::NotADL, ConversionSequenceList EarlyConversions = None); void AddFunctionCandidates(const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, bool SuppressUserConversions = false, bool PartialOverloading = false, bool FirstArgumentIsBase = false); void AddMethodCandidate(DeclAccessPair FoundDecl, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversion = false); void AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, ConversionSequenceList EarlyConversions = None); void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false); void AddTemplateOverloadCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, ADLCallKind IsADLCandidate = ADLCallKind::NotADL); bool CheckNonDependentConversions(FunctionTemplateDecl *FunctionTemplate, ArrayRef<QualType> ParamTypes, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, ConversionSequenceList &Conversions, bool SuppressUserConversions, CXXRecordDecl *ActingContext = nullptr, QualType ObjectType = QualType(), Expr::Classification ObjectClassification = {}); void AddConversionCandidate( CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddTemplateConversionCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddSurrogateCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, const FunctionProtoType *Proto, Expr *Object, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddMemberOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, SourceRange OpRange = SourceRange()); void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool IsAssignmentOperator = false, unsigned NumContextualBoolArguments = 0); void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddArgumentDependentLookupCandidates(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, TemplateArgumentListInfo *ExplicitTemplateArgs, OverloadCandidateSet& CandidateSet, bool PartialOverloading = false); // Emit as a 'note' the specific overload candidate void NoteOverloadCandidate(NamedDecl *Found, FunctionDecl *Fn, QualType DestType = QualType(), bool TakingAddress = false); // Emit as a series of 'note's all template and non-templates identified by // the expression Expr void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(), bool TakingAddress = false); /// Check the enable_if expressions on the given function. Returns the first /// failing attribute, or NULL if they were all successful. EnableIfAttr *CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args, bool MissingImplicitThis = false); /// Find the failed Boolean condition within a given Boolean /// constant expression, and describe it with a string. std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// non-ArgDependent DiagnoseIfAttrs. /// /// Argument-dependent diagnose_if attributes should be checked each time a /// function is used as a direct callee of a function call. /// /// Returns true if any errors were emitted. bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function, const Expr *ThisArg, ArrayRef<const Expr *> Args, SourceLocation Loc); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// ArgDependent DiagnoseIfAttrs. /// /// Argument-independent diagnose_if attributes should be checked on every use /// of a function. /// /// Returns true if any errors were emitted. bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND, SourceLocation Loc); /// Returns whether the given function's address can be taken or not, /// optionally emitting a diagnostic if the address can't be taken. /// /// Returns false if taking the address of the function is illegal. bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function, bool Complain = false, SourceLocation Loc = SourceLocation()); // [PossiblyAFunctionType] --> [Return] // NonFunctionType --> NonFunctionType // R (A) --> R(A) // R (*)(A) --> R (A) // R (&)(A) --> R (A) // R (S::*)(A) --> R (A) QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType); FunctionDecl * ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr, QualType TargetType, bool Complain, DeclAccessPair &Found, bool *pHadMultipleCandidates = nullptr); FunctionDecl * resolveAddressOfOnlyViableOverloadCandidate(Expr *E, DeclAccessPair &FoundResult); bool resolveAndFixAddressOfOnlyViableOverloadCandidate( ExprResult &SrcExpr, bool DoFunctionPointerConversion = false); FunctionDecl * ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl, bool Complain = false, DeclAccessPair *Found = nullptr); bool ResolveAndFixSingleFunctionTemplateSpecialization( ExprResult &SrcExpr, bool DoFunctionPointerConverion = false, bool Complain = false, SourceRange OpRangeForComplaining = SourceRange(), QualType DestTypeForComplaining = QualType(), unsigned DiagIDForComplaining = 0); Expr *FixOverloadedFunctionReference(Expr *E, DeclAccessPair FoundDecl, FunctionDecl *Fn); ExprResult FixOverloadedFunctionReference(ExprResult, DeclAccessPair FoundDecl, FunctionDecl *Fn); void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool PartialOverloading = false); // An enum used to represent the different possible results of building a // range-based for loop. enum ForRangeStatus { FRS_Success, FRS_NoViableFunction, FRS_DiagnosticIssued }; ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc, SourceLocation RangeLoc, const DeclarationNameInfo &NameInfo, LookupResult &MemberLookup, OverloadCandidateSet *CandidateSet, Expr *Range, ExprResult *CallExpr); ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc, Expr *ExecConfig, bool AllowTypoCorrection=true, bool CalleesAddressIsTaken=false); bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, MultiExprArg Args, SourceLocation RParenLoc, OverloadCandidateSet *CandidateSet, ExprResult *Result); ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *input, bool RequiresADL = true); ExprResult CreateOverloadedBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS, bool RequiresADL = true); ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc, SourceLocation RLoc, Expr *Base,Expr *Idx); ExprResult BuildCallToMemberFunction(Scope *S, Expr *MemExpr, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc, bool *NoArrowOperatorFound = nullptr); /// CheckCallReturnType - Checks that a call expression's return type is /// complete. Returns true on failure. The location passed in is the location /// that best represents the call. bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc, CallExpr *CE, FunctionDecl *FD); /// Helpers for dealing with blocks and functions. bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters, bool CheckParameterNames); void CheckCXXDefaultArguments(FunctionDecl *FD); void CheckExtraCXXDefaultArguments(Declarator &D); Scope *getNonFieldDeclScope(Scope *S); /// \name Name lookup /// /// These routines provide name lookup that is used during semantic /// analysis to resolve the various kinds of names (identifiers, /// overloaded operator names, constructor names, etc.) into zero or /// more declarations within a particular scope. The major entry /// points are LookupName, which performs unqualified name lookup, /// and LookupQualifiedName, which performs qualified name lookup. /// /// All name lookup is performed based on some specific criteria, /// which specify what names will be visible to name lookup and how /// far name lookup should work. These criteria are important both /// for capturing language semantics (certain lookups will ignore /// certain names, for example) and for performance, since name /// lookup is often a bottleneck in the compilation of C++. Name /// lookup criteria is specified via the LookupCriteria enumeration. /// /// The results of name lookup can vary based on the kind of name /// lookup performed, the current language, and the translation /// unit. In C, for example, name lookup will either return nothing /// (no entity found) or a single declaration. In C++, name lookup /// can additionally refer to a set of overloaded functions or /// result in an ambiguity. All of the possible results of name /// lookup are captured by the LookupResult class, which provides /// the ability to distinguish among them. //@{ /// Describes the kind of name lookup to perform. enum LookupNameKind { /// Ordinary name lookup, which finds ordinary names (functions, /// variables, typedefs, etc.) in C and most kinds of names /// (functions, variables, members, types, etc.) in C++. LookupOrdinaryName = 0, /// Tag name lookup, which finds the names of enums, classes, /// structs, and unions. LookupTagName, /// Label name lookup. LookupLabel, /// Member name lookup, which finds the names of /// class/struct/union members. LookupMemberName, /// Look up of an operator name (e.g., operator+) for use with /// operator overloading. This lookup is similar to ordinary name /// lookup, but will ignore any declarations that are class members. LookupOperatorName, /// Look up of a name that precedes the '::' scope resolution /// operator in C++. This lookup completely ignores operator, object, /// function, and enumerator names (C++ [basic.lookup.qual]p1). LookupNestedNameSpecifierName, /// Look up a namespace name within a C++ using directive or /// namespace alias definition, ignoring non-namespace names (C++ /// [basic.lookup.udir]p1). LookupNamespaceName, /// Look up all declarations in a scope with the given name, /// including resolved using declarations. This is appropriate /// for checking redeclarations for a using declaration. LookupUsingDeclName, /// Look up an ordinary name that is going to be redeclared as a /// name with linkage. This lookup ignores any declarations that /// are outside of the current scope unless they have linkage. See /// C99 6.2.2p4-5 and C++ [basic.link]p6. LookupRedeclarationWithLinkage, /// Look up a friend of a local class. This lookup does not look /// outside the innermost non-class scope. See C++11 [class.friend]p11. LookupLocalFriendName, /// Look up the name of an Objective-C protocol. LookupObjCProtocolName, /// Look up implicit 'self' parameter of an objective-c method. LookupObjCImplicitSelfParam, /// Look up the name of an OpenMP user-defined reduction operation. LookupOMPReductionName, /// Look up the name of an OpenMP user-defined mapper. LookupOMPMapperName, /// Look up any declaration with any name. LookupAnyName }; /// Specifies whether (or how) name lookup is being performed for a /// redeclaration (vs. a reference). enum RedeclarationKind { /// The lookup is a reference to this name that is not for the /// purpose of redeclaring the name. NotForRedeclaration = 0, /// The lookup results will be used for redeclaration of a name, /// if an entity by that name already exists and is visible. ForVisibleRedeclaration, /// The lookup results will be used for redeclaration of a name /// with external linkage; non-visible lookup results with external linkage /// may also be found. ForExternalRedeclaration }; RedeclarationKind forRedeclarationInCurContext() { // A declaration with an owning module for linkage can never link against // anything that is not visible. We don't need to check linkage here; if // the context has internal linkage, redeclaration lookup won't find things // from other TUs, and we can't safely compute linkage yet in general. if (cast<Decl>(CurContext) ->getOwningModuleForLinkage(/*IgnoreLinkage*/true)) return ForVisibleRedeclaration; return ForExternalRedeclaration; } /// The possible outcomes of name lookup for a literal operator. enum LiteralOperatorLookupResult { /// The lookup resulted in an error. LOLR_Error, /// The lookup found no match but no diagnostic was issued. LOLR_ErrorNoDiagnostic, /// The lookup found a single 'cooked' literal operator, which /// expects a normal literal to be built and passed to it. LOLR_Cooked, /// The lookup found a single 'raw' literal operator, which expects /// a string literal containing the spelling of the literal token. LOLR_Raw, /// The lookup found an overload set of literal operator templates, /// which expect the characters of the spelling of the literal token to be /// passed as a non-type template argument pack. LOLR_Template, /// The lookup found an overload set of literal operator templates, /// which expect the character type and characters of the spelling of the /// string literal token to be passed as template arguments. LOLR_StringTemplate }; SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D, CXXSpecialMember SM, bool ConstArg, bool VolatileArg, bool RValueThis, bool ConstThis, bool VolatileThis); typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator; typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)> TypoRecoveryCallback; private: bool CppLookupName(LookupResult &R, Scope *S); struct TypoExprState { std::unique_ptr<TypoCorrectionConsumer> Consumer; TypoDiagnosticGenerator DiagHandler; TypoRecoveryCallback RecoveryHandler; TypoExprState(); TypoExprState(TypoExprState &&other) noexcept; TypoExprState &operator=(TypoExprState &&other) noexcept; }; /// The set of unhandled TypoExprs and their associated state. llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos; /// Creates a new TypoExpr AST node. TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC); // The set of known/encountered (unique, canonicalized) NamespaceDecls. // // The boolean value will be true to indicate that the namespace was loaded // from an AST/PCH file, or false otherwise. llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces; /// Whether we have already loaded known namespaces from an extenal /// source. bool LoadedExternalKnownNamespaces; /// Helper for CorrectTypo and CorrectTypoDelayed used to create and /// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction /// should be skipped entirely. std::unique_ptr<TypoCorrectionConsumer> makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, DeclContext *MemberContext, bool EnteringContext, const ObjCObjectPointerType *OPT, bool ErrorRecovery); public: const TypoExprState &getTypoExprState(TypoExpr *TE) const; /// Clears the state of the given TypoExpr. void clearDelayedTypo(TypoExpr *TE); /// Look up a name, looking for a single declaration. Return /// null if the results were absent, ambiguous, or overloaded. /// /// It is preferable to use the elaborated form and explicitly handle /// ambiguity and overloaded. NamedDecl *LookupSingleName(Scope *S, DeclarationName Name, SourceLocation Loc, LookupNameKind NameKind, RedeclarationKind Redecl = NotForRedeclaration); bool LookupName(LookupResult &R, Scope *S, bool AllowBuiltinCreation = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, bool InUnqualifiedLookup = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, CXXScopeSpec &SS); bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS, bool AllowBuiltinCreation = false, bool EnteringContext = false); ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc, RedeclarationKind Redecl = NotForRedeclaration); bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class); void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S, QualType T1, QualType T2, UnresolvedSetImpl &Functions); LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc, SourceLocation GnuLabelLoc = SourceLocation()); DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class); CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class); CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class); bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id); LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R, ArrayRef<QualType> ArgTys, bool AllowRaw, bool AllowTemplate, bool AllowStringTemplate, bool DiagnoseMissing); bool isKnownName(StringRef name); void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, ADLResult &Functions); void LookupVisibleDecls(Scope *S, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool LoadExternal = true); void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool IncludeDependentBases = false, bool LoadExternal = true); enum CorrectTypoKind { CTK_NonError, // CorrectTypo used in a non error recovery situation. CTK_ErrorRecovery // CorrectTypo used in normal error recovery. }; TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr, bool RecordFailure = true); TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr); /// Process any TypoExprs in the given Expr and its children, /// generating diagnostics as appropriate and returning a new Expr if there /// were typos that were all successfully corrected and ExprError if one or /// more typos could not be corrected. /// /// \param E The Expr to check for TypoExprs. /// /// \param InitDecl A VarDecl to avoid because the Expr being corrected is its /// initializer. /// /// \param Filter A function applied to a newly rebuilt Expr to determine if /// it is an acceptable/usable result from a single combination of typo /// corrections. As long as the filter returns ExprError, different /// combinations of corrections will be tried until all are exhausted. ExprResult CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }); ExprResult CorrectDelayedTyposInExpr(Expr *E, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(E, nullptr, Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }) { return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(ER, nullptr, Filter); } void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, bool ErrorRecovery = true); void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, const PartialDiagnostic &PrevNote, bool ErrorRecovery = true); void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F); void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc, ArrayRef<Expr *> Args, AssociatedNamespaceSet &AssociatedNamespaces, AssociatedClassSet &AssociatedClasses); void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S, bool ConsiderLinkage, bool AllowInlineNamespace); bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old); void DiagnoseAmbiguousLookup(LookupResult &Result); //@} ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id, SourceLocation IdLoc, bool TypoCorrection = false); NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID, Scope *S, bool ForRedeclaration, SourceLocation Loc); NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II, Scope *S); void AddKnownFunctionAttributes(FunctionDecl *FD); // More parsing and symbol table subroutines. void ProcessPragmaWeak(Scope *S, Decl *D); // Decl attributes - this routine is the top level dispatcher. void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD); // Helper for delayed processing of attributes. void ProcessDeclAttributeDelayed(Decl *D, const ParsedAttributesView &AttrList); void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL, bool IncludeCXX11Attributes = true); bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl, const ParsedAttributesView &AttrList); void checkUnusedDeclAttributes(Declarator &D); /// Determine if type T is a valid subject for a nonnull and similar /// attributes. By default, we look through references (the behavior used by /// nonnull), but if the second parameter is true, then we treat a reference /// type as valid. bool isValidPointerAttrType(QualType T, bool RefOkay = false); bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value); bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC, const FunctionDecl *FD = nullptr); bool CheckAttrTarget(const ParsedAttr &CurrAttr); bool CheckAttrNoArgs(const ParsedAttr &CurrAttr); bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum, StringRef &Str, SourceLocation *ArgLocation = nullptr); bool checkSectionName(SourceLocation LiteralLoc, StringRef Str); bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str); bool checkMSInheritanceAttrOnDefinition( CXXRecordDecl *RD, SourceRange Range, bool BestCase, MSInheritanceAttr::Spelling SemanticSpelling); void CheckAlignasUnderalignment(Decl *D); /// Adjust the calling convention of a method to be the ABI default if it /// wasn't specified explicitly. This handles method types formed from /// function type typedefs and typename template arguments. void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor, SourceLocation Loc); // Check if there is an explicit attribute, but only look through parens. // The intent is to look for an attribute on the current declarator, but not // one that came from a typedef. bool hasExplicitCallingConv(QualType T); /// Get the outermost AttributedType node that sets a calling convention. /// Valid types should not have multiple attributes with different CCs. const AttributedType *getCallingConvAttributedType(QualType T) const; /// Stmt attributes - this routine is the top level dispatcher. StmtResult ProcessStmtAttributes(Stmt *Stmt, const ParsedAttributesView &Attrs, SourceRange Range); void WarnConflictingTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); void CheckConflictingOverridingMethod(ObjCMethodDecl *Method, ObjCMethodDecl *Overridden, bool IsProtocolMethodDecl); /// WarnExactTypedMethods - This routine issues a warning if method /// implementation declaration matches exactly that of its declaration. void WarnExactTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); typedef llvm::SmallPtrSet<Selector, 8> SelectorSet; /// CheckImplementationIvars - This routine checks if the instance variables /// listed in the implelementation match those listed in the interface. void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl, ObjCIvarDecl **Fields, unsigned nIvars, SourceLocation Loc); /// ImplMethodsVsClassMethods - This is main routine to warn if any method /// remains unimplemented in the class or category \@implementation. void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool IncompleteImpl = false); /// DiagnoseUnimplementedProperties - This routine warns on those properties /// which must be implemented by this implementation. void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl *CDecl, bool SynthesizeProperties); /// Diagnose any null-resettable synthesized setters. void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl); /// DefaultSynthesizeProperties - This routine default synthesizes all /// properties which must be synthesized in the class's \@implementation. void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl, ObjCInterfaceDecl *IDecl, SourceLocation AtEnd); void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd); /// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is /// an ivar synthesized for 'Method' and 'Method' is a property accessor /// declared in class 'IFace'. bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace, ObjCMethodDecl *Method, ObjCIvarDecl *IV); /// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which /// backs the property is not used in the property's accessor. void DiagnoseUnusedBackingIvarInAccessor(Scope *S, const ObjCImplementationDecl *ImplD); /// GetIvarBackingPropertyAccessor - If method is a property setter/getter and /// it property has a backing ivar, returns this ivar; otherwise, returns NULL. /// It also returns ivar's property on success. ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method, const ObjCPropertyDecl *&PDecl) const; /// Called by ActOnProperty to handle \@property declarations in /// class extensions. ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, unsigned &Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind); /// Called by ActOnProperty and HandlePropertyInClassExtension to /// handle creating the ObjcPropertyDecl for a category or \@interface. ObjCPropertyDecl *CreatePropertyDecl(Scope *S, ObjCContainerDecl *CDecl, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); /// AtomicPropertySetterGetterRules - This routine enforces the rule (via /// warning) when atomic property has one but not the other user-declared /// setter or getter. void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl, ObjCInterfaceDecl* IDecl); void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D); void DiagnoseMissingDesignatedInitOverrides( const ObjCImplementationDecl *ImplD, const ObjCInterfaceDecl *IFD); void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID); enum MethodMatchStrategy { MMS_loose, MMS_strict }; /// MatchTwoMethodDeclarations - Checks if two methods' type match and returns /// true, or false, accordingly. bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method, const ObjCMethodDecl *PrevMethod, MethodMatchStrategy strategy = MMS_strict); /// MatchAllMethodDeclarations - Check methods declaraed in interface or /// or protocol against those declared in their implementations. void MatchAllMethodDeclarations(const SelectorSet &InsMap, const SelectorSet &ClsMap, SelectorSet &InsMapSeen, SelectorSet &ClsMapSeen, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool &IncompleteImpl, bool ImmediateClass, bool WarnCategoryMethodImpl=false); /// CheckCategoryVsClassMethodMatches - Checks that methods implemented in /// category matches with those implemented in its primary class and /// warns each time an exact match is found. void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP); /// Add the given method to the list of globally-known methods. void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method); private: /// AddMethodToGlobalPool - Add an instance or factory method to the global /// pool. See descriptoin of AddInstanceMethodToGlobalPool. void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance); /// LookupMethodInGlobalPool - Returns the instance or factory method and /// optionally warns if there are multiple signatures. ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass, bool instance); public: /// - Returns instance or factory methods in global method pool for /// given selector. It checks the desired kind first, if none is found, and /// parameter checkTheOther is set, it then checks the other kind. If no such /// method or only one method is found, function returns false; otherwise, it /// returns true. bool CollectMultipleMethodsInGlobalPool(Selector Sel, SmallVectorImpl<ObjCMethodDecl*>& Methods, bool InstanceFirst, bool CheckTheOther, const ObjCObjectType *TypeBound = nullptr); bool AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod, SourceRange R, bool receiverIdOrClass, SmallVectorImpl<ObjCMethodDecl*>& Methods); void DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods, Selector Sel, SourceRange R, bool receiverIdOrClass); private: /// - Returns a selector which best matches given argument list or /// nullptr if none could be found ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args, bool IsInstance, SmallVectorImpl<ObjCMethodDecl*>& Methods); /// Record the typo correction failure and return an empty correction. TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc, bool RecordFailure = true) { if (RecordFailure) TypoCorrectionFailures[Typo].insert(TypoLoc); return TypoCorrection(); } public: /// AddInstanceMethodToGlobalPool - All instance methods in a translation /// unit are added to a global pool. This allows us to efficiently associate /// a selector with a method declaraation for purposes of typechecking /// messages sent to "id" (where the class of the object is unknown). void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/true); } /// AddFactoryMethodToGlobalPool - Same as above, but for factory methods. void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/false); } /// AddAnyMethodToGlobalPool - Add any method, instance or factory to global /// pool. void AddAnyMethodToGlobalPool(Decl *D); /// LookupInstanceMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/true); } /// LookupFactoryMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/false); } const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel, QualType ObjectType=QualType()); /// LookupImplementedMethodInGlobalPool - Returns the method which has an /// implementation. ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel); /// CollectIvarsToConstructOrDestruct - Collect those ivars which require /// initialization. void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI, SmallVectorImpl<ObjCIvarDecl*> &Ivars); //===--------------------------------------------------------------------===// // Statement Parsing Callbacks: SemaStmt.cpp. public: class FullExprArg { public: FullExprArg() : E(nullptr) { } FullExprArg(Sema &actions) : E(nullptr) { } ExprResult release() { return E; } Expr *get() const { return E; } Expr *operator->() { return E; } private: // FIXME: No need to make the entire Sema class a friend when it's just // Sema::MakeFullExpr that needs access to the constructor below. friend class Sema; explicit FullExprArg(Expr *expr) : E(expr) {} Expr *E; }; FullExprArg MakeFullExpr(Expr *Arg) { return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation()); } FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) { return FullExprArg( ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get()); } FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) { ExprResult FE = ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(), /*DiscardedValue*/ true); return FullExprArg(FE.get()); } StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true); StmtResult ActOnExprStmtError(); StmtResult ActOnNullStmt(SourceLocation SemiLoc, bool HasLeadingEmptyMacro = false); void ActOnStartOfCompoundStmt(bool IsStmtExpr); void ActOnFinishOfCompoundStmt(); StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R, ArrayRef<Stmt *> Elts, bool isStmtExpr); /// A RAII object to enter scope of a compound statement. class CompoundScopeRAII { public: CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) { S.ActOnStartOfCompoundStmt(IsStmtExpr); } ~CompoundScopeRAII() { S.ActOnFinishOfCompoundStmt(); } private: Sema &S; }; /// An RAII helper that pops function a function scope on exit. struct FunctionScopeRAII { Sema &S; bool Active; FunctionScopeRAII(Sema &S) : S(S), Active(true) {} ~FunctionScopeRAII() { if (Active) S.PopFunctionScopeInfo(); } void disable() { Active = false; } }; StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl, SourceLocation StartLoc, SourceLocation EndLoc); void ActOnForEachDeclStmt(DeclGroupPtrTy Decl); StmtResult ActOnForEachLValueExpr(Expr *E); ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val); StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS, SourceLocation DotDotDotLoc, ExprResult RHS, SourceLocation ColonLoc); void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt); StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc, SourceLocation ColonLoc, Stmt *SubStmt, Scope *CurScope); StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl, SourceLocation ColonLoc, Stmt *SubStmt); StmtResult ActOnAttributedStmt(SourceLocation AttrLoc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt); class ConditionResult; StmtResult ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr, Stmt *InitStmt, ConditionResult Cond, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr, Stmt *InitStmt, ConditionResult Cond, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc, Stmt *InitStmt, ConditionResult Cond); StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch, Stmt *Body); StmtResult ActOnWhileStmt(SourceLocation WhileLoc, ConditionResult Cond, Stmt *Body); StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body, SourceLocation WhileLoc, SourceLocation CondLParen, Expr *Cond, SourceLocation CondRParen); StmtResult ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc, Stmt *First, ConditionResult Second, FullExprArg Third, SourceLocation RParenLoc, Stmt *Body); ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc, Expr *collection); StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc, Stmt *First, Expr *collection, SourceLocation RParenLoc); StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body); enum BuildForRangeKind { /// Initial building of a for-range statement. BFRK_Build, /// Instantiation or recovery rebuild of a for-range statement. Don't /// attempt any typo-correction. BFRK_Rebuild, /// Determining whether a for-range statement could be built. Avoid any /// unnecessary or irreversible actions. BFRK_Check }; StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, Stmt *LoopVar, SourceLocation ColonLoc, Expr *Collection, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, SourceLocation ColonLoc, Stmt *RangeDecl, Stmt *Begin, Stmt *End, Expr *Cond, Expr *Inc, Stmt *LoopVarDecl, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body); StmtResult ActOnGotoStmt(SourceLocation GotoLoc, SourceLocation LabelLoc, LabelDecl *TheDecl); StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc, SourceLocation StarLoc, Expr *DestExp); StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope); StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope); void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, unsigned NumParams); typedef std::pair<StringRef, QualType> CapturedParamNameType; void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, ArrayRef<CapturedParamNameType> Params); StmtResult ActOnCapturedRegionEnd(Stmt *S); void ActOnCapturedRegionError(); RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD, SourceLocation Loc, unsigned NumParams); enum CopyElisionSemanticsKind { CES_Strict = 0, CES_AllowParameters = 1, CES_AllowDifferentTypes = 2, CES_AllowExceptionVariables = 4, CES_FormerDefault = (CES_AllowParameters), CES_Default = (CES_AllowParameters | CES_AllowDifferentTypes), CES_AsIfByStdMove = (CES_AllowParameters | CES_AllowDifferentTypes | CES_AllowExceptionVariables), }; VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E, CopyElisionSemanticsKind CESK); bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD, CopyElisionSemanticsKind CESK); StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, Scope *CurScope); StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple, bool IsVolatile, unsigned NumOutputs, unsigned NumInputs, IdentifierInfo **Names, MultiExprArg Constraints, MultiExprArg Exprs, Expr *AsmString, MultiExprArg Clobbers, SourceLocation RParenLoc); void FillInlineAsmIdentifierInfo(Expr *Res, llvm::InlineAsmIdentifierInfo &Info); ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool IsUnevaluatedContext); bool LookupInlineAsmField(StringRef Base, StringRef Member, unsigned &Offset, SourceLocation AsmLoc); ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member, SourceLocation AsmLoc); StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc, ArrayRef<Token> AsmToks, StringRef AsmString, unsigned NumOutputs, unsigned NumInputs, ArrayRef<StringRef> Constraints, ArrayRef<StringRef> Clobbers, ArrayRef<Expr*> Exprs, SourceLocation EndLoc); LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName, SourceLocation Location, bool AlwaysCreate); VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id, bool Invalid = false); Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D); StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen, Decl *Parm, Stmt *Body); StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body); StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try, MultiStmtArg Catch, Stmt *Finally); StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw); StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw, Scope *CurScope); ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc, Expr *operand); StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc, Expr *SynchExpr, Stmt *SynchBody); StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body); VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id); Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D); StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc, Decl *ExDecl, Stmt *HandlerBlock); StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock, ArrayRef<Stmt *> Handlers); StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ? SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); StmtResult ActOnSEHExceptBlock(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); void ActOnStartSEHFinallyBlock(); void ActOnAbortSEHFinallyBlock(); StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block); StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope); void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock); bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const; /// If it's a file scoped decl that must warn if not used, keep track /// of it. void MarkUnusedFileScopedDecl(const DeclaratorDecl *D); /// DiagnoseUnusedExprResult - If the statement passed in is an expression /// whose result is unused, warn. void DiagnoseUnusedExprResult(const Stmt *S); void DiagnoseUnusedNestedTypedefs(const RecordDecl *D); void DiagnoseUnusedDecl(const NamedDecl *ND); /// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null /// statement as a \p Body, and it is located on the same line. /// /// This helps prevent bugs due to typos, such as: /// if (condition); /// do_stuff(); void DiagnoseEmptyStmtBody(SourceLocation StmtLoc, const Stmt *Body, unsigned DiagID); /// Warn if a for/while loop statement \p S, which is followed by /// \p PossibleBody, has a suspicious null statement as a body. void DiagnoseEmptyLoopBody(const Stmt *S, const Stmt *PossibleBody); /// Warn if a value is moved to itself. void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, SourceLocation OpLoc); /// Warn if we're implicitly casting from a _Nullable pointer type to a /// _Nonnull one. void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType, SourceLocation Loc); /// Warn when implicitly casting 0 to nullptr. void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E); ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) { return DelayedDiagnostics.push(pool); } void PopParsingDeclaration(ParsingDeclState state, Decl *decl); typedef ProcessingContextState ParsingClassState; ParsingClassState PushParsingClass() { return DelayedDiagnostics.pushUndelayed(); } void PopParsingClass(ParsingClassState state) { DelayedDiagnostics.popUndelayed(state); } void redelayDiagnostics(sema::DelayedDiagnosticPool &pool); void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass, bool ObjCPropertyAccess, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReceiver = nullptr); bool makeUnavailableInSystemHeader(SourceLocation loc, UnavailableAttr::ImplicitReason reason); /// Issue any -Wunguarded-availability warnings in \c FD void DiagnoseUnguardedAvailabilityViolations(Decl *FD); //===--------------------------------------------------------------------===// // Expression Parsing Callbacks: SemaExpr.cpp. bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid); bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass = nullptr, bool ObjCPropertyAccess = false, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReciever = nullptr); void NoteDeletedFunction(FunctionDecl *FD); void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD); bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD, ObjCMethodDecl *Getter, SourceLocation Loc); void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc, ArrayRef<Expr *> Args); void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl }; void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); void PopExpressionEvaluationContext(); void DiscardCleanupsInEvaluationContext(); ExprResult TransformToPotentiallyEvaluated(Expr *E); ExprResult HandleExprEvaluationContextForTypeof(Expr *E); ExprResult ActOnConstantExpression(ExprResult Res); // Functions for marking a declaration referenced. These functions also // contain the relevant logic for marking if a reference to a function or // variable is an odr-use (in the C++11 sense). There are separate variants // for expressions referring to a decl; these exist because odr-use marking // needs to be delayed for some constant variables when we build one of the // named expressions. // // MightBeOdrUse indicates whether the use could possibly be an odr-use, and // should usually be true. This only needs to be set to false if the lack of // odr-use cannot be determined from the current context (for instance, // because the name denotes a virtual function and was written without an // explicit nested-name-specifier). void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse); void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func, bool MightBeOdrUse = true); void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var); void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr); void MarkMemberReferenced(MemberExpr *E); void UpdateMarkingForLValueToRValue(Expr *E); void CleanupVarDeclMarking(); enum TryCaptureKind { TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef }; /// Try to capture the given variable. /// /// \param Var The variable to capture. /// /// \param Loc The location at which the capture occurs. /// /// \param Kind The kind of capture, which may be implicit (for either a /// block or a lambda), or explicit by-value or by-reference (for a lambda). /// /// \param EllipsisLoc The location of the ellipsis, if one is provided in /// an explicit lambda capture. /// /// \param BuildAndDiagnose Whether we are actually supposed to add the /// captures or diagnose errors. If false, this routine merely check whether /// the capture can occur without performing the capture itself or complaining /// if the variable cannot be captured. /// /// \param CaptureType Will be set to the type of the field used to capture /// this variable in the innermost block or lambda. Only valid when the /// variable can be captured. /// /// \param DeclRefType Will be set to the type of a reference to the capture /// from within the current scope. Only valid when the variable can be /// captured. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// variables that may or may not be used in certain specializations of /// a nested generic lambda. /// /// \returns true if an error occurred (i.e., the variable cannot be /// captured) and false if the capture succeeded. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind, SourceLocation EllipsisLoc, bool BuildAndDiagnose, QualType &CaptureType, QualType &DeclRefType, const unsigned *const FunctionScopeIndexToStopAt); /// Try to capture the given variable. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind = TryCapture_Implicit, SourceLocation EllipsisLoc = SourceLocation()); /// Checks if the variable must be captured. bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc); /// Given a variable, determine the type that a reference to that /// variable will have in the given scope. QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc); /// Mark all of the declarations referenced within a particular AST node as /// referenced. Used when template instantiation instantiates a non-dependent /// type -- entities referenced by the type are now referenced. void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T); void MarkDeclarationsReferencedInExpr(Expr *E, bool SkipLocalVariables = false); /// Try to recover by turning the given expression into a /// call. Returns true if recovery was attempted or an error was /// emitted; this may also leave the ExprResult invalid. bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD, bool ForceComplain = false, bool (*IsPlausibleResult)(QualType) = nullptr); /// Figure out if an expression could be turned into a call. bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy, UnresolvedSetImpl &NonTemplateOverloads); /// Conditionally issue a diagnostic based on the current /// evaluation context. /// /// \param Statement If Statement is non-null, delay reporting the /// diagnostic until the function body is parsed, and then do a basic /// reachability analysis to determine if the statement is reachable. /// If it is unreachable, the diagnostic will not be emitted. bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement, const PartialDiagnostic &PD); /// Similar, but diagnostic is only produced if all the specified statements /// are reachable. bool DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts, const PartialDiagnostic &PD); // Primary Expressions. SourceRange getExprRange(Expr *E) const; ExprResult ActOnIdExpression( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand, CorrectionCandidateCallback *CCC = nullptr, bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr); void DecomposeUnqualifiedId(const UnqualifiedId &Id, TemplateArgumentListInfo &Buffer, DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *&TemplateArgs); bool DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R, CorrectionCandidateCallback &CCC, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr); ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S, IdentifierInfo *II, bool AllowBuiltinCreation=false); ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, bool isAddressOfOperand, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, SourceLocation Loc, const CXXScopeSpec *SS = nullptr); ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, const CXXScopeSpec *SS = nullptr, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr); ExprResult BuildAnonymousStructUnionMemberReference( const CXXScopeSpec &SS, SourceLocation nameLoc, IndirectFieldDecl *indirectField, DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none), Expr *baseObjectExpr = nullptr, SourceLocation opLoc = SourceLocation()); ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S); ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, bool IsDefiniteInstance, const Scope *S); bool UseArgumentDependentLookup(const CXXScopeSpec &SS, const LookupResult &R, bool HasTrailingLParen); ExprResult BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, bool IsAddressOfOperand, const Scope *S, TypeSourceInfo **RecoveryTSI = nullptr); ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS, LookupResult &R, bool NeedsADL, bool AcceptInvalidDecl = false); ExprResult BuildDeclarationNameExpr( const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr, bool AcceptInvalidDecl = false); ExprResult BuildLiteralOperatorCall(LookupResult &R, DeclarationNameInfo &SuffixInfo, ArrayRef<Expr *> Args, SourceLocation LitEndLoc, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); ExprResult BuildPredefinedExpr(SourceLocation Loc, PredefinedExpr::IdentKind IK); ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind); ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val); bool CheckLoopHintExpr(Expr *E, SourceLocation Loc); ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnCharacterConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E); ExprResult ActOnParenListExpr(SourceLocation L, SourceLocation R, MultiExprArg Val); /// ActOnStringLiteral - The specified tokens were lexed as pasted string /// fragments (e.g. "foo" "bar" L"baz"). ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope = nullptr); ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<ParsedType> ArgTypes, ArrayRef<Expr *> ArgExprs); ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<TypeSourceInfo *> Types, ArrayRef<Expr *> Exprs); // Binary/Unary Operators. 'Tok' is the token for the operator. ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *InputExpr); ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *Input); ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Op, Expr *Input); bool isQualifiedMemberAccess(Expr *E); QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc); ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, SourceRange R); ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, bool IsType, void *TyOrEx, SourceRange ArgRange); ExprResult CheckPlaceholderExpr(Expr *E); bool CheckVecStepExpr(Expr *E); bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind); bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc, SourceRange ExprRange, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnSizeofParameterPackExpr(Scope *S, SourceLocation OpLoc, IdentifierInfo &Name, SourceLocation NameLoc, SourceLocation RParenLoc); ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Kind, Expr *Input); ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc, Expr *LowerBound, SourceLocation ColonLoc, Expr *Length, SourceLocation RBLoc); // This struct is for use by ActOnMemberAccess to allow // BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after // changing the access operator from a '.' to a '->' (to see if that is the // change needed to fix an error about an unknown member, e.g. when the class // defines a custom operator->). struct ActOnMemberAccessExtraArgs { Scope *S; UnqualifiedId &Id; Decl *ObjCImpDecl; }; ExprResult BuildMemberReferenceExpr( Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, bool SuppressQualifierCheck = false, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, FieldDecl *Field, DeclAccessPair FoundDecl, const DeclarationNameInfo &MemberNameInfo); ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow); bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType, const CXXScopeSpec &SS, const LookupResult &R); ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Member, Decl *ObjCImpDecl); void ActOnDefaultCtorInitializers(Decl *CDtorDecl); bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn, FunctionDecl *FDecl, const FunctionProtoType *Proto, ArrayRef<Expr *> Args, SourceLocation RParenLoc, bool ExecConfig = false); void CheckStaticArrayArgument(SourceLocation CallLoc, ParmVarDecl *Param, const Expr *ArgExpr); /// ActOnCallExpr - Handle a call to Fn with the specified array of arguments. /// This provides the location of the left/right parens and a list of comma /// locations. ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr); ExprResult BuildCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr, bool IsExecConfig = false); ExprResult BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc, ArrayRef<Expr *> Arg, SourceLocation RParenLoc, Expr *Config = nullptr, bool IsExecConfig = false, ADLCallKind UsesADL = ADLCallKind::NotADL); ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc, MultiExprArg ExecConfig, SourceLocation GGGLoc); ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc, Declarator &D, ParsedType &Ty, SourceLocation RParenLoc, Expr *CastExpr); ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc, TypeSourceInfo *Ty, SourceLocation RParenLoc, Expr *Op); CastKind PrepareScalarCast(ExprResult &src, QualType destType); /// Build an altivec or OpenCL literal. ExprResult BuildVectorLiteral(SourceLocation LParenLoc, SourceLocation RParenLoc, Expr *E, TypeSourceInfo *TInfo); ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME); ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc, Expr *InitExpr); ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo, SourceLocation RParenLoc, Expr *LiteralExpr); ExprResult ActOnInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult ActOnDesignatedInitializer(Designation &Desig, SourceLocation Loc, bool GNUSyntax, ExprResult Init); private: static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind); public: ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc, tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr); ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc); /// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null /// in the case of a the GNU conditional expr extension. ExprResult ActOnConditionalOp(SourceLocation QuestionLoc, SourceLocation ColonLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr); /// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo". ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc, LabelDecl *TheDecl); void ActOnStartStmtExpr(); ExprResult ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc); // "({..})" // Handle the final expression in a statement expression. ExprResult ActOnStmtExprResult(ExprResult E); void ActOnStmtExprError(); // __builtin_offsetof(type, identifier(.identifier|[expr])*) struct OffsetOfComponent { SourceLocation LocStart, LocEnd; bool isBrackets; // true if [expr], false if .ident union { IdentifierInfo *IdentInfo; Expr *E; } U; }; /// __builtin_offsetof(type, a.b[123][456].c) ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc, TypeSourceInfo *TInfo, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); ExprResult ActOnBuiltinOffsetOf(Scope *S, SourceLocation BuiltinLoc, SourceLocation TypeLoc, ParsedType ParsedArgTy, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); // __builtin_choose_expr(constExpr, expr1, expr2) ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr, SourceLocation RPLoc); // __builtin_va_arg(expr, type) ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty, SourceLocation RPLoc); ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E, TypeSourceInfo *TInfo, SourceLocation RPLoc); // __null ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc); bool CheckCaseExpression(Expr *E); /// Describes the result of an "if-exists" condition check. enum IfExistsResult { /// The symbol exists. IER_Exists, /// The symbol does not exist. IER_DoesNotExist, /// The name is a dependent name, so the results will differ /// from one instantiation to the next. IER_Dependent, /// An error occurred. IER_Error }; IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS, const DeclarationNameInfo &TargetNameInfo); IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name); StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, NestedNameSpecifierLoc QualifierLoc, DeclarationNameInfo NameInfo, Stmt *Nested); StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name, Stmt *Nested); //===------------------------- "Block" Extension ------------------------===// /// ActOnBlockStart - This callback is invoked when a block literal is /// started. void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockArguments - This callback allows processing of block arguments. /// If there are no arguments, this is still invoked. void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo, Scope *CurScope); /// ActOnBlockError - If there is an error parsing a block, this callback /// is invoked to pop the information about the block from the action impl. void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockStmtExpr - This is called when the body of a block statement /// literal was successfully completed. ^(int x){...} ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body, Scope *CurScope); //===---------------------------- Clang Extensions ----------------------===// /// __builtin_convertvector(...) ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- OpenCL Features -----------------------===// /// __builtin_astype(...) ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- C++ Features --------------------------===// // Act on C++ namespaces Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc, SourceLocation NamespaceLoc, SourceLocation IdentLoc, IdentifierInfo *Ident, SourceLocation LBrace, const ParsedAttributesView &AttrList, UsingDirectiveDecl *&UsingDecl); void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace); NamespaceDecl *getStdNamespace() const; NamespaceDecl *getOrCreateStdNamespace(); NamespaceDecl *lookupStdExperimentalNamespace(); CXXRecordDecl *getStdBadAlloc() const; EnumDecl *getStdAlignValT() const; private: // A cache representing if we've fully checked the various comparison category // types stored in ASTContext. The bit-index corresponds to the integer value // of a ComparisonCategoryType enumerator. llvm::SmallBitVector FullyCheckedComparisonCategories; ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl, CXXScopeSpec &SS, ParsedType TemplateTypeTy, IdentifierInfo *MemberOrBase); public: /// Lookup the specified comparison category types in the standard /// library, an check the VarDecls possibly returned by the operator<=> /// builtins for that type. /// /// \return The type of the comparison category type corresponding to the /// specified Kind, or a null type if an error occurs QualType CheckComparisonCategoryType(ComparisonCategoryType Kind, SourceLocation Loc); /// Tests whether Ty is an instance of std::initializer_list and, if /// it is and Element is not NULL, assigns the element type to Element. bool isStdInitializerList(QualType Ty, QualType *Element); /// Looks for the std::initializer_list template and instantiates it /// with Element, or emits an error if it's not found. /// /// \returns The instantiated template, or null on error. QualType BuildStdInitializerList(QualType Element, SourceLocation Loc); /// Determine whether Ctor is an initializer-list constructor, as /// defined in [dcl.init.list]p2. bool isInitListConstructor(const FunctionDecl *Ctor); Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc, SourceLocation NamespcLoc, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *NamespcName, const ParsedAttributesView &AttrList); void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir); Decl *ActOnNamespaceAliasDef(Scope *CurScope, SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *Ident); void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow); bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target, const LookupResult &PreviousDecls, UsingShadowDecl *&PrevShadow); UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD, NamedDecl *Target, UsingShadowDecl *PrevDecl); bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc, bool HasTypenameKeyword, const CXXScopeSpec &SS, SourceLocation NameLoc, const LookupResult &Previous); bool CheckUsingDeclQualifier(SourceLocation UsingLoc, bool HasTypename, const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, SourceLocation NameLoc); NamedDecl *BuildUsingDeclaration( Scope *S, AccessSpecifier AS, SourceLocation UsingLoc, bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS, DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList, bool IsInstantiation); NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom, ArrayRef<NamedDecl *> Expansions); bool CheckInheritingConstructorUsingDecl(UsingDecl *UD); /// Given a derived-class using shadow declaration for a constructor and the /// correspnding base class constructor, find or create the implicit /// synthesized derived class constructor to use for this initialization. CXXConstructorDecl * findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor, ConstructorUsingShadowDecl *DerivedShadow); Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS, SourceLocation UsingLoc, SourceLocation TypenameLoc, CXXScopeSpec &SS, UnqualifiedId &Name, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList); Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS, MultiTemplateParamsArg TemplateParams, SourceLocation UsingLoc, UnqualifiedId &Name, const ParsedAttributesView &AttrList, TypeResult Type, Decl *DeclFromDeclSpec); /// BuildCXXConstructExpr - Creates a complete call to a constructor, /// including handling of its default argument expressions. /// /// \param ConstructKind - a CXXConstructExpr::ConstructionKind ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); /// Build a CXXConstructExpr whose constructor has already been resolved if /// it denotes an inherited constructor. ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); // FIXME: Can we remove this and have the above BuildCXXConstructExpr check if // the constructor can be elidable? ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field); /// Instantiate or parse a C++ default argument expression as necessary. /// Return true on error. bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating /// the default expr if needed. ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// FinalizeVarWithDestructor - Prepare for calling destructor on the /// constructed variable. void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType); /// Helper class that collects exception specifications for /// implicitly-declared special member functions. class ImplicitExceptionSpecification { // Pointer to allow copying Sema *Self; // We order exception specifications thus: // noexcept is the most restrictive, but is only used in C++11. // throw() comes next. // Then a throw(collected exceptions) // Finally no specification, which is expressed as noexcept(false). // throw(...) is used instead if any called function uses it. ExceptionSpecificationType ComputedEST; llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen; SmallVector<QualType, 4> Exceptions; void ClearExceptions() { ExceptionsSeen.clear(); Exceptions.clear(); } public: explicit ImplicitExceptionSpecification(Sema &Self) : Self(&Self), ComputedEST(EST_BasicNoexcept) { if (!Self.getLangOpts().CPlusPlus11) ComputedEST = EST_DynamicNone; } /// Get the computed exception specification type. ExceptionSpecificationType getExceptionSpecType() const { assert(!isComputedNoexcept(ComputedEST) && "noexcept(expr) should not be a possible result"); return ComputedEST; } /// The number of exceptions in the exception specification. unsigned size() const { return Exceptions.size(); } /// The set of exceptions in the exception specification. const QualType *data() const { return Exceptions.data(); } /// Integrate another called method into the collected data. void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method); /// Integrate an invoked expression into the collected data. void CalledExpr(Expr *E); /// Overwrite an EPI's exception specification with this /// computed exception specification. FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const { FunctionProtoType::ExceptionSpecInfo ESI; ESI.Type = getExceptionSpecType(); if (ESI.Type == EST_Dynamic) { ESI.Exceptions = Exceptions; } else if (ESI.Type == EST_None) { /// C++11 [except.spec]p14: /// The exception-specification is noexcept(false) if the set of /// potential exceptions of the special member function contains "any" ESI.Type = EST_NoexceptFalse; ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(), tok::kw_false).get(); } return ESI; } }; /// Determine what sort of exception specification a defaulted /// copy constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// default constructor of a class will have, and whether the parameter /// will be const. ImplicitExceptionSpecification ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// copy assignment operator of a class will have, and whether the /// parameter will be const. ImplicitExceptionSpecification ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted move /// constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted move /// assignment operator of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// destructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification an inheriting /// constructor of a class will have. ImplicitExceptionSpecification ComputeInheritingCtorExceptionSpec(SourceLocation Loc, CXXConstructorDecl *CD); /// Evaluate the implicit exception specification for a defaulted /// special member function. void EvaluateImplicitExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// Check the given noexcept-specifier, convert its expression, and compute /// the appropriate ExceptionSpecificationType. ExprResult ActOnNoexceptSpec(SourceLocation NoexceptLoc, Expr *NoexceptExpr, ExceptionSpecificationType &EST); /// Check the given exception-specification and update the /// exception specification information with the results. void checkExceptionSpecification(bool IsTopLevel, ExceptionSpecificationType EST, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr, SmallVectorImpl<QualType> &Exceptions, FunctionProtoType::ExceptionSpecInfo &ESI); /// Determine if we're in a case where we need to (incorrectly) eagerly /// parse an exception specification to work around a libstdc++ bug. bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D); /// Add an exception-specification to the given member function /// (or member function template). The exception-specification was parsed /// after the method itself was declared. void actOnDelayedExceptionSpecification(Decl *Method, ExceptionSpecificationType EST, SourceRange SpecificationRange, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr); class InheritedConstructorInfo; /// Determine if a special member function should have a deleted /// definition when it is defaulted. bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM, InheritedConstructorInfo *ICI = nullptr, bool Diagnose = false); /// Declare the implicit default constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// default constructor will be added. /// /// \returns The implicitly-declared default constructor. CXXConstructorDecl *DeclareImplicitDefaultConstructor( CXXRecordDecl *ClassDecl); /// DefineImplicitDefaultConstructor - Checks for feasibility of /// defining this constructor as the default constructor. void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit destructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// destructor will be added. /// /// \returns The implicitly-declared destructor. CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl); /// DefineImplicitDestructor - Checks for feasibility of /// defining this destructor as the default destructor. void DefineImplicitDestructor(SourceLocation CurrentLocation, CXXDestructorDecl *Destructor); /// Build an exception spec for destructors that don't have one. /// /// C++11 says that user-defined destructors with no exception spec get one /// that looks as if the destructor was implicitly declared. void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor); /// Define the specified inheriting constructor. void DefineInheritingConstructor(SourceLocation UseLoc, CXXConstructorDecl *Constructor); /// Declare the implicit copy constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy constructor will be added. /// /// \returns The implicitly-declared copy constructor. CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitCopyConstructor - Checks for feasibility of /// defining this constructor as the copy constructor. void DefineImplicitCopyConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit move constructor for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move constructor will be added. /// /// \returns The implicitly-declared move constructor, or NULL if it wasn't /// declared. CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitMoveConstructor - Checks for feasibility of /// defining this constructor as the move constructor. void DefineImplicitMoveConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit copy assignment operator for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy assignment operator will be added. /// /// \returns The implicitly-declared copy assignment operator. CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared copy assignment operator. void DefineImplicitCopyAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Declare the implicit move assignment operator for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move assignment operator will be added. /// /// \returns The implicitly-declared move assignment operator, or NULL if it /// wasn't declared. CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared move assignment operator. void DefineImplicitMoveAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Force the declaration of any implicitly-declared members of this /// class. void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class); /// Check a completed declaration of an implicit special member. void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD); /// Determine whether the given function is an implicitly-deleted /// special member function. bool isImplicitlyDeleted(FunctionDecl *FD); /// Check whether 'this' shows up in the type of a static member /// function after the (naturally empty) cv-qualifier-seq would be. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method); /// Whether this' shows up in the exception specification of a static /// member function. bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method); /// Check whether 'this' shows up in the attributes of the given /// static member function. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method); /// MaybeBindToTemporary - If the passed in expression has a record type with /// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise /// it simply returns the passed in expression. ExprResult MaybeBindToTemporary(Expr *E); bool CompleteConstructorCall(CXXConstructorDecl *Constructor, MultiExprArg ArgsPtr, SourceLocation Loc, SmallVectorImpl<Expr*> &ConvertedArgs, bool AllowExplicit = false, bool IsListInitialization = false); ParsedType getInheritingConstructorName(CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo &Name); ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, bool EnteringContext); ParsedType getDestructorName(SourceLocation TildeLoc, IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, ParsedType ObjectType, bool EnteringContext); ParsedType getDestructorTypeForDecltype(const DeclSpec &DS, ParsedType ObjectType); // Checks that reinterpret casts don't have undefined behavior. void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType, bool IsDereference, SourceRange Range); /// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's. ExprResult ActOnCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, SourceLocation LAngleBracketLoc, Declarator &D, SourceLocation RAngleBracketLoc, SourceLocation LParenLoc, Expr *E, SourceLocation RParenLoc); ExprResult BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, TypeSourceInfo *Ty, Expr *E, SourceRange AngleBrackets, SourceRange Parens); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXTypeid - Parse typeid( something ). ExprResult ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXUuidof - Parse __uuidof( something ). ExprResult ActOnCXXUuidof(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); /// Handle a C++1z fold-expression: ( expr op ... op expr ). ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, tok::TokenKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, BinaryOperatorKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc, Optional<unsigned> NumExpansions); ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc, BinaryOperatorKind Operator); //// ActOnCXXThis - Parse 'this' pointer. ExprResult ActOnCXXThis(SourceLocation loc); /// Try to retrieve the type of the 'this' pointer. /// /// \returns The type of 'this', if possible. Otherwise, returns a NULL type. QualType getCurrentThisType(); /// When non-NULL, the C++ 'this' expression is allowed despite the /// current context not being a non-static member function. In such cases, /// this provides the type used for 'this'. QualType CXXThisTypeOverride; /// RAII object used to temporarily allow the C++ 'this' expression /// to be used, with the given qualifiers on the current class type. class CXXThisScopeRAII { Sema &S; QualType OldCXXThisTypeOverride; bool Enabled; public: /// Introduce a new scope where 'this' may be allowed (when enabled), /// using the given declaration (which is either a class template or a /// class) along with the given qualifiers. /// along with the qualifiers placed on '*this'. CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals, bool Enabled = true); ~CXXThisScopeRAII(); }; /// Make sure the value of 'this' is actually available in the current /// context, if it is a potentially evaluated context. /// /// \param Loc The location at which the capture of 'this' occurs. /// /// \param Explicit Whether 'this' is explicitly captured in a lambda /// capture list. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// 'this' that may or may not be used in certain specializations of /// a nested generic lambda (depending on whether the name resolves to /// a non-static member function or a static function). /// \return returns 'true' if failed, 'false' if success. bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false, bool BuildAndDiagnose = true, const unsigned *const FunctionScopeIndexToStopAt = nullptr, bool ByCopy = false); /// Determine whether the given type is the type of *this that is used /// outside of the body of a member function for a type that is currently /// being defined. bool isThisOutsideMemberFunctionBody(QualType BaseType); /// ActOnCXXBoolLiteral - Parse {true,false} literals. ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); /// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals. ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); ExprResult ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs, SourceLocation AtLoc, SourceLocation RParen); /// ActOnCXXNullPtrLiteral - Parse 'nullptr'. ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc); //// ActOnCXXThrow - Parse throw expressions. ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr); ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex, bool IsThrownVarInScope); bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E); /// ActOnCXXTypeConstructExpr - Parse construction of a specified type. /// Can be interpreted either as function-style casting ("int(x)") /// or class type construction ("ClassType(x,y,z)") /// or creation of a value-initialized type ("int()"). ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep, SourceLocation LParenOrBraceLoc, MultiExprArg Exprs, SourceLocation RParenOrBraceLoc, bool ListInitialization); ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc, bool ListInitialization); /// ActOnCXXNew - Parsed a C++ 'new' expression. ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, Declarator &D, Expr *Initializer); ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, QualType AllocType, TypeSourceInfo *AllocTypeInfo, Optional<Expr *> ArraySize, SourceRange DirectInitRange, Expr *Initializer); /// Determine whether \p FD is an aligned allocation or deallocation /// function that is unavailable. bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const; /// Produce diagnostics if \p FD is an aligned allocation or deallocation /// function that is unavailable. void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD, SourceLocation Loc); bool CheckAllocatedType(QualType AllocType, SourceLocation Loc, SourceRange R); /// The scope in which to find allocation functions. enum AllocationFunctionScope { /// Only look for allocation functions in the global scope. AFS_Global, /// Only look for allocation functions in the scope of the /// allocated class. AFS_Class, /// Look for allocation functions in both the global scope /// and in the scope of the allocated class. AFS_Both }; /// Finds the overloads of operator new and delete that are appropriate /// for the allocation. bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range, AllocationFunctionScope NewScope, AllocationFunctionScope DeleteScope, QualType AllocType, bool IsArray, bool &PassAlignment, MultiExprArg PlaceArgs, FunctionDecl *&OperatorNew, FunctionDecl *&OperatorDelete, bool Diagnose = true); void DeclareGlobalNewDelete(); void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return, ArrayRef<QualType> Params); bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD, DeclarationName Name, FunctionDecl* &Operator, bool Diagnose = true); FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc, bool CanProvideSize, bool Overaligned, DeclarationName Name); FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc, CXXRecordDecl *RD); /// ActOnCXXDelete - Parsed a C++ 'delete' expression ExprResult ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal, bool ArrayForm, Expr *Operand); void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc, bool IsDelete, bool CallCanBeVirtual, bool WarnOnNonAbstractTypes, SourceLocation DtorLoc); ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen, Expr *Operand, SourceLocation RParen); ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand, SourceLocation RParen); /// Parsed one of the type trait support pseudo-functions. ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<ParsedType> Args, SourceLocation RParenLoc); ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<TypeSourceInfo *> Args, SourceLocation RParenLoc); /// ActOnArrayTypeTrait - Parsed one of the binary type trait support /// pseudo-functions. ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, ParsedType LhsTy, Expr *DimExpr, SourceLocation RParen); ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, TypeSourceInfo *TSInfo, Expr *DimExpr, SourceLocation RParen); /// ActOnExpressionTrait - Parsed one of the unary type trait support /// pseudo-functions. ExprResult ActOnExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult BuildExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult ActOnStartCXXMemberReference(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, ParsedType &ObjectType, bool &MayBePseudoDestructor); ExprResult BuildPseudoDestructorExpr(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, const CXXScopeSpec &SS, TypeSourceInfo *ScopeType, SourceLocation CCLoc, SourceLocation TildeLoc, PseudoDestructorTypeStorage DestroyedType); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, UnqualifiedId &FirstTypeName, SourceLocation CCLoc, SourceLocation TildeLoc, UnqualifiedId &SecondTypeName); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, SourceLocation TildeLoc, const DeclSpec& DS); /// MaybeCreateExprWithCleanups - If the current full-expression /// requires any cleanups, surround it with a ExprWithCleanups node. /// Otherwise, just returns the passed-in expression. Expr *MaybeCreateExprWithCleanups(Expr *SubExpr); Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt); ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr); MaterializeTemporaryExpr * CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary, bool BoundToLvalueReference); ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) { return ActOnFinishFullExpr( Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue); } ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC, bool DiscardedValue, bool IsConstexpr = false); StmtResult ActOnFinishFullStmt(Stmt *Stmt); // Marks SS invalid if it represents an incomplete type. bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC); DeclContext *computeDeclContext(QualType T); DeclContext *computeDeclContext(const CXXScopeSpec &SS, bool EnteringContext = false); bool isDependentScopeSpecifier(const CXXScopeSpec &SS); CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS); /// The parser has parsed a global nested-name-specifier '::'. /// /// \param CCLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS); /// The parser has parsed a '__super' nested-name-specifier. /// /// \param SuperLoc The location of the '__super' keyword. /// /// \param ColonColonLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc, SourceLocation ColonColonLoc, CXXScopeSpec &SS); bool isAcceptableNestedNameSpecifier(const NamedDecl *SD, bool *CanCorrect = nullptr); NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS); /// Keeps information about an identifier in a nested-name-spec. /// struct NestedNameSpecInfo { /// The type of the object, if we're parsing nested-name-specifier in /// a member access expression. ParsedType ObjectType; /// The identifier preceding the '::'. IdentifierInfo *Identifier; /// The location of the identifier. SourceLocation IdentifierLoc; /// The location of the '::'. SourceLocation CCLoc; /// Creates info object for the most typical case. NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType()) : ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, QualType ObjectType) : ObjectType(ParsedType::make(ObjectType)), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } }; bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo); bool BuildCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, NamedDecl *ScopeLookupResult, bool ErrorRecoveryLookup, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); /// The parser has parsed a nested-name-specifier 'identifier::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param IdInfo Parser information about an identifier in the /// nested-name-spec. /// /// \param EnteringContext Whether we're entering the context nominated by /// this nested-name-specifier. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param ErrorRecoveryLookup If true, then this method is called to improve /// error recovery. In this case do not emit error message. /// /// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':' /// are allowed. The bool value pointed by this parameter is set to 'true' /// if the identifier is treated as if it was followed by ':', not '::'. /// /// \param OnlyNamespace If true, only considers namespaces in lookup. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, bool ErrorRecoveryLookup = false, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); ExprResult ActOnDecltypeExpression(Expr *E); bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS, const DeclSpec &DS, SourceLocation ColonColonLoc); bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo, bool EnteringContext); /// The parser has parsed a nested-name-specifier /// 'template[opt] template-name < template-args >::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param TemplateKWLoc the location of the 'template' keyword, if any. /// \param TemplateName the template name. /// \param TemplateNameLoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). /// \param CCLoc The location of the '::'. /// /// \param EnteringContext Whether we're entering the context of the /// nested-name-specifier. /// /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateName, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, SourceLocation CCLoc, bool EnteringContext); /// Given a C++ nested-name-specifier, produce an annotation value /// that the parser can use later to reconstruct the given /// nested-name-specifier. /// /// \param SS A nested-name-specifier. /// /// \returns A pointer containing all of the information in the /// nested-name-specifier \p SS. void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS); /// Given an annotation pointer for a nested-name-specifier, restore /// the nested-name-specifier structure. /// /// \param Annotation The annotation pointer, produced by /// \c SaveNestedNameSpecifierAnnotation(). /// /// \param AnnotationRange The source range corresponding to the annotation. /// /// \param SS The nested-name-specifier that will be updated with the contents /// of the annotation pointer. void RestoreNestedNameSpecifierAnnotation(void *Annotation, SourceRange AnnotationRange, CXXScopeSpec &SS); bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global /// scope or nested-name-specifier) is parsed, part of a declarator-id. /// After this method is called, according to [C++ 3.4.3p3], names should be /// looked up in the declarator-id's scope, until the declarator is parsed and /// ActOnCXXExitDeclaratorScope is called. /// The 'SS' should be a non-empty valid CXXScopeSpec. bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS); /// ActOnCXXExitDeclaratorScope - Called when a declarator that previously /// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same /// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well. /// Used to indicate that names should revert to being looked up in the /// defining scope. void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an /// initializer for the declaration 'Dcl'. /// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a /// static data member of class X, names should be looked up in the scope of /// class X. void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl); /// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an /// initializer for the declaration 'Dcl'. void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl); /// Create a new lambda closure type. CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange, TypeSourceInfo *Info, bool KnownDependent, LambdaCaptureDefault CaptureDefault); /// Start the definition of a lambda expression. CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class, SourceRange IntroducerRange, TypeSourceInfo *MethodType, SourceLocation EndLoc, ArrayRef<ParmVarDecl *> Params, bool IsConstexprSpecified); /// Endow the lambda scope info with the relevant properties. void buildLambdaScope(sema::LambdaScopeInfo *LSI, CXXMethodDecl *CallOperator, SourceRange IntroducerRange, LambdaCaptureDefault CaptureDefault, SourceLocation CaptureDefaultLoc, bool ExplicitParams, bool ExplicitResultType, bool Mutable); /// Perform initialization analysis of the init-capture and perform /// any implicit conversions such as an lvalue-to-rvalue conversion if /// not being used to initialize a reference. ParsedType actOnLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) { return ParsedType::make(buildLambdaInitCaptureInitialization( Loc, ByRef, Id, InitKind != LambdaCaptureInitKind::CopyInit, Init)); } QualType buildLambdaInitCaptureInitialization(SourceLocation Loc, bool ByRef, IdentifierInfo *Id, bool DirectInit, Expr *&Init); /// Create a dummy variable within the declcontext of the lambda's /// call operator, for name lookup purposes for a lambda init capture. /// /// CodeGen handles emission of lambda captures, ignoring these dummy /// variables appropriately. VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc, QualType InitCaptureType, IdentifierInfo *Id, unsigned InitStyle, Expr *Init); /// Build the implicit field for an init-capture. FieldDecl *buildInitCaptureField(sema::LambdaScopeInfo *LSI, VarDecl *Var); /// Note that we have finished the explicit captures for the /// given lambda. void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI); /// \brief This is called after parsing the explicit template parameter list /// on a lambda (if it exists) in C++2a. void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc, ArrayRef<NamedDecl *> TParams, SourceLocation RAngleLoc); /// Introduce the lambda parameters into scope. void addLambdaParameters( ArrayRef<LambdaIntroducer::LambdaCapture> Captures, CXXMethodDecl *CallOperator, Scope *CurScope); /// Deduce a block or lambda's return type based on the return /// statements present in the body. void deduceClosureReturnType(sema::CapturingScopeInfo &CSI); /// ActOnStartOfLambdaDefinition - This is called just before we start /// parsing the body of a lambda; it analyzes the explicit captures and /// arguments, and sets up various data-structures for the body of the /// lambda. void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro, Declarator &ParamInfo, Scope *CurScope); /// ActOnLambdaError - If there is an error parsing a lambda, this callback /// is invoked to pop the information about the lambda. void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope, bool IsInstantiation = false); /// ActOnLambdaExpr - This is called when the body of a lambda expression /// was successfully completed. ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body, Scope *CurScope); /// Does copying/destroying the captured variable have side effects? bool CaptureHasSideEffects(const sema::Capture &From); /// Diagnose if an explicit lambda capture is unused. Returns true if a /// diagnostic is emitted. bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange, const sema::Capture &From); /// Complete a lambda-expression having processed and attached the /// lambda body. ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc, sema::LambdaScopeInfo *LSI); /// Get the return type to use for a lambda's conversion function(s) to /// function pointer type, given the type of the call operator. QualType getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType); /// Define the "body" of the conversion from a lambda object to a /// function pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToFunctionPointerConversion( SourceLocation CurrentLoc, CXXConversionDecl *Conv); /// Define the "body" of the conversion from a lambda object to a /// block pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc, CXXConversionDecl *Conv); ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation, SourceLocation ConvLocation, CXXConversionDecl *Conv, Expr *Src); // ParseObjCStringLiteral - Parse Objective-C string literals. ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs, ArrayRef<Expr *> Strings); ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S); /// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the /// numeric literal expression. Type of the expression will be "NSNumber *" /// or "id" if NSNumber is unavailable. ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number); ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc, bool Value); ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements); /// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the /// '@' prefixed parenthesized expression. The type of the expression will /// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type /// of ValueType, which is allowed to be a built-in numeric type, "char *", /// "const char *" or C structure with attribute 'objc_boxable'. ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr); ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr, Expr *IndexExpr, ObjCMethodDecl *getterMethod, ObjCMethodDecl *setterMethod); ExprResult BuildObjCDictionaryLiteral(SourceRange SR, MutableArrayRef<ObjCDictionaryElement> Elements); ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc, TypeSourceInfo *EncodedTypeInfo, SourceLocation RParenLoc); ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl, CXXConversionDecl *Method, bool HadMultipleCandidates); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc, SourceLocation EncodeLoc, SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc); /// ParseObjCSelectorExpression - Build selector expression for \@selector ExprResult ParseObjCSelectorExpression(Selector Sel, SourceLocation AtLoc, SourceLocation SelLoc, SourceLocation LParenLoc, SourceLocation RParenLoc, bool WarnMultipleSelectors); /// ParseObjCProtocolExpression - Build protocol expression for \@protocol ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName, SourceLocation AtLoc, SourceLocation ProtoLoc, SourceLocation LParenLoc, SourceLocation ProtoIdLoc, SourceLocation RParenLoc); //===--------------------------------------------------------------------===// // C++ Declarations // Decl *ActOnStartLinkageSpecification(Scope *S, SourceLocation ExternLoc, Expr *LangStr, SourceLocation LBraceLoc); Decl *ActOnFinishLinkageSpecification(Scope *S, Decl *LinkageSpec, SourceLocation RBraceLoc); //===--------------------------------------------------------------------===// // C++ Classes // CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS); bool isCurrentClassName(const IdentifierInfo &II, Scope *S, const CXXScopeSpec *SS = nullptr); bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS); bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc, SourceLocation ColonLoc, const ParsedAttributesView &Attrs); NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, Expr *BitfieldWidth, const VirtSpecifiers &VS, InClassInitStyle InitStyle); void ActOnStartCXXInClassMemberInitializer(); void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl, SourceLocation EqualLoc, Expr *Init); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, SourceLocation LParenLoc, ArrayRef<Expr *> Args, SourceLocation RParenLoc, SourceLocation EllipsisLoc); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *InitList, SourceLocation EllipsisLoc); MemInitResult BuildMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *Init, SourceLocation EllipsisLoc); MemInitResult BuildMemberInitializer(ValueDecl *Member, Expr *Init, SourceLocation IdLoc); MemInitResult BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo, Expr *Init, CXXRecordDecl *ClassDecl, SourceLocation EllipsisLoc); MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo, Expr *Init, CXXRecordDecl *ClassDecl); bool SetDelegatingInitializer(CXXConstructorDecl *Constructor, CXXCtorInitializer *Initializer); bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors, ArrayRef<CXXCtorInitializer *> Initializers = None); void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation); /// MarkBaseAndMemberDestructorsReferenced - Given a record decl, /// mark all the non-trivial destructors of its members and bases as /// referenced. void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc, CXXRecordDecl *Record); /// The list of classes whose vtables have been used within /// this translation unit, and the source locations at which the /// first use occurred. typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse; /// The list of vtables that are required but have not yet been /// materialized. SmallVector<VTableUse, 16> VTableUses; /// The set of classes whose vtables have been used within /// this translation unit, and a bit that will be true if the vtable is /// required to be emitted (otherwise, it should be emitted only if needed /// by code generation). llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed; /// Load any externally-stored vtable uses. void LoadExternalVTableUses(); /// Note that the vtable for the given class was used at the /// given location. void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class, bool DefinitionRequired = false); /// Mark the exception specifications of all virtual member functions /// in the given class as needed. void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc, const CXXRecordDecl *RD); /// MarkVirtualMembersReferenced - Will mark all members of the given /// CXXRecordDecl referenced. void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD, bool ConstexprOnly = false); /// Define all of the vtables that have been used in this /// translation unit and reference any virtual members used by those /// vtables. /// /// \returns true if any work was done, false otherwise. bool DefineUsedVTables(); void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl); void ActOnMemInitializers(Decl *ConstructorDecl, SourceLocation ColonLoc, ArrayRef<CXXCtorInitializer*> MemInits, bool AnyErrors); /// Check class-level dllimport/dllexport attribute. The caller must /// ensure that referenceDLLExportedClassMethods is called some point later /// when all outer classes of Class are complete. void checkClassLevelDLLAttribute(CXXRecordDecl *Class); void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class); void referenceDLLExportedClassMethods(); void propagateDLLAttrToBaseClassTemplate( CXXRecordDecl *Class, Attr *ClassAttr, ClassTemplateSpecializationDecl *BaseTemplateSpec, SourceLocation BaseLoc); void CheckCompletedCXXClass(CXXRecordDecl *Record); /// Check that the C++ class annoated with "trivial_abi" satisfies all the /// conditions that are needed for the attribute to have an effect. void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD); void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc, Decl *TagDecl, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); void ActOnFinishCXXMemberDecls(); void ActOnFinishCXXNonNestedClass(Decl *D); void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param); unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template); void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param); void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnFinishDelayedMemberInitializers(Decl *Record); void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD, CachedTokens &Toks); void UnmarkAsLateParsedTemplate(FunctionDecl *FD); bool IsInsideALocalClassWithinATemplateFunction(); Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, Expr *AssertMessageExpr, SourceLocation RParenLoc); Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, StringLiteral *AssertMessageExpr, SourceLocation RParenLoc, bool Failed); FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart, SourceLocation FriendLoc, TypeSourceInfo *TSInfo); Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS, MultiTemplateParamsArg TemplateParams); NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParams); QualType CheckConstructorDeclarator(Declarator &D, QualType R, StorageClass& SC); void CheckConstructor(CXXConstructorDecl *Constructor); QualType CheckDestructorDeclarator(Declarator &D, QualType R, StorageClass& SC); bool CheckDestructor(CXXDestructorDecl *Destructor); void CheckConversionDeclarator(Declarator &D, QualType &R, StorageClass& SC); Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion); void CheckDeductionGuideDeclarator(Declarator &D, QualType &R, StorageClass &SC); void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD); void CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD); void CheckDelayedMemberExceptionSpecs(); //===--------------------------------------------------------------------===// // C++ Derived Classes // /// ActOnBaseSpecifier - Parsed a base specifier CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class, SourceRange SpecifierRange, bool Virtual, AccessSpecifier Access, TypeSourceInfo *TInfo, SourceLocation EllipsisLoc); BaseResult ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange, ParsedAttributes &Attrs, bool Virtual, AccessSpecifier Access, ParsedType basetype, SourceLocation BaseLoc, SourceLocation EllipsisLoc); bool AttachBaseSpecifiers(CXXRecordDecl *Class, MutableArrayRef<CXXBaseSpecifier *> Bases); void ActOnBaseSpecifiers(Decl *ClassDecl, MutableArrayRef<CXXBaseSpecifier *> Bases); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base, CXXBasePaths &Paths); // FIXME: I don't like this name. void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, SourceLocation Loc, SourceRange Range, CXXCastPath *BasePath = nullptr, bool IgnoreAccess = false); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, unsigned InaccessibleBaseID, unsigned AmbigiousBaseConvID, SourceLocation Loc, SourceRange Range, DeclarationName Name, CXXCastPath *BasePath, bool IgnoreAccess = false); std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths); bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionReturnType - Checks whether the return types are /// covariant, according to C++ [class.virtual]p5. bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionExceptionSpec - Checks whether the exception /// spec is a subset of base spec. bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New, const CXXMethodDecl *Old); bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange); /// CheckOverrideControl - Check C++11 override control semantics. void CheckOverrideControl(NamedDecl *D); /// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was /// not used in the declaration of an overriding method. void DiagnoseAbsenceOfOverrideControl(NamedDecl *D); /// CheckForFunctionMarkedFinal - Checks whether a virtual member function /// overrides a virtual member function marked 'final', according to /// C++11 [class.virtual]p4. bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New, const CXXMethodDecl *Old); //===--------------------------------------------------------------------===// // C++ Access Control // enum AccessResult { AR_accessible, AR_inaccessible, AR_dependent, AR_delayed }; bool SetMemberAccessSpecifier(NamedDecl *MemberDecl, NamedDecl *PrevMemberDecl, AccessSpecifier LexicalAS); AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E, DeclAccessPair FoundDecl); AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E, DeclAccessPair FoundDecl); AccessResult CheckAllocationAccess(SourceLocation OperatorLoc, SourceRange PlacementRange, CXXRecordDecl *NamingClass, DeclAccessPair FoundDecl, bool Diagnose = true); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, bool IsCopyBindingRefToTemp = false); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, const PartialDiagnostic &PDiag); AccessResult CheckDestructorAccess(SourceLocation Loc, CXXDestructorDecl *Dtor, const PartialDiagnostic &PDiag, QualType objectType = QualType()); AccessResult CheckFriendAccess(NamedDecl *D); AccessResult CheckMemberAccess(SourceLocation UseLoc, CXXRecordDecl *NamingClass, DeclAccessPair Found); AccessResult CheckStructuredBindingMemberAccess(SourceLocation UseLoc, CXXRecordDecl *DecomposedClass, DeclAccessPair Field); AccessResult CheckMemberOperatorAccess(SourceLocation Loc, Expr *ObjectExpr, Expr *ArgExpr, DeclAccessPair FoundDecl); AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr, DeclAccessPair FoundDecl); AccessResult CheckBaseClassAccess(SourceLocation AccessLoc, QualType Base, QualType Derived, const CXXBasePath &Path, unsigned DiagID, bool ForceCheck = false, bool ForceUnprivileged = false); void CheckLookupAccess(const LookupResult &R); bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass, QualType BaseType); bool isSpecialMemberAccessibleForDeletion(CXXMethodDecl *decl, AccessSpecifier access, QualType objectType); void HandleDependentAccessCheck(const DependentDiagnostic &DD, const MultiLevelTemplateArgumentList &TemplateArgs); void PerformDependentDiagnostics(const DeclContext *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); /// When true, access checking violations are treated as SFINAE /// failures rather than hard errors. bool AccessCheckingSFINAE; enum AbstractDiagSelID { AbstractNone = -1, AbstractReturnType, AbstractParamType, AbstractVariableType, AbstractFieldType, AbstractIvarType, AbstractSynthesizedIvarType, AbstractArrayType }; bool isAbstractType(SourceLocation Loc, QualType T); bool RequireNonAbstractType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); template <typename... Ts> bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireNonAbstractType(Loc, T, Diagnoser); } void DiagnoseAbstractType(const CXXRecordDecl *RD); //===--------------------------------------------------------------------===// // C++ Overloaded Operators [C++ 13.5] // bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl); bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl); //===--------------------------------------------------------------------===// // C++ Templates [C++ 14] // void FilterAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true); bool hasAnyAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true, bool AllowNonTemplateFunctions = false); /// Try to interpret the lookup result D as a template-name. /// /// \param D A declaration found by name lookup. /// \param AllowFunctionTemplates Whether function templates should be /// considered valid results. /// \param AllowDependent Whether unresolved using declarations (that might /// name templates) should be considered valid results. NamedDecl *getAsTemplateNameDecl(NamedDecl *D, bool AllowFunctionTemplates = true, bool AllowDependent = true); enum class AssumedTemplateKind { /// This is not assumed to be a template name. None, /// This is assumed to be a template name because lookup found nothing. FoundNothing, /// This is assumed to be a template name because lookup found one or more /// functions (but no function templates). FoundFunctions, }; bool LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType, bool EnteringContext, bool &MemberOfUnknownSpecialization, SourceLocation TemplateKWLoc = SourceLocation(), AssumedTemplateKind *ATK = nullptr); TemplateNameKind isTemplateName(Scope *S, CXXScopeSpec &SS, bool hasTemplateKeyword, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool &MemberOfUnknownSpecialization); /// Try to resolve an undeclared template name as a type template. /// /// Sets II to the identifier corresponding to the template name, and updates /// Name to a corresponding (typo-corrected) type template name and TNK to /// the corresponding kind, if possible. void ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &Name, TemplateNameKind &TNK, SourceLocation NameLoc, IdentifierInfo *&II); bool resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name, SourceLocation NameLoc, bool Diagnose = true); /// Determine whether a particular identifier might be the name in a C++1z /// deduction-guide declaration. bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name, SourceLocation NameLoc, ParsedTemplateTy *Template = nullptr); bool DiagnoseUnknownTemplateName(const IdentifierInfo &II, SourceLocation IILoc, Scope *S, const CXXScopeSpec *SS, TemplateTy &SuggestedTemplate, TemplateNameKind &SuggestedKind); bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation, NamedDecl *Instantiation, bool InstantiatedFromMember, const NamedDecl *Pattern, const NamedDecl *PatternDef, TemplateSpecializationKind TSK, bool Complain = true); void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl); TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl); NamedDecl *ActOnTypeParameter(Scope *S, bool Typename, SourceLocation EllipsisLoc, SourceLocation KeyLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedType DefaultArg); QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI, SourceLocation Loc); QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc); NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D, unsigned Depth, unsigned Position, SourceLocation EqualLoc, Expr *DefaultArg); NamedDecl *ActOnTemplateTemplateParameter(Scope *S, SourceLocation TmpLoc, TemplateParameterList *Params, SourceLocation EllipsisLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedTemplateArgument DefaultArg); TemplateParameterList * ActOnTemplateParameterList(unsigned Depth, SourceLocation ExportLoc, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ArrayRef<NamedDecl *> Params, SourceLocation RAngleLoc, Expr *RequiresClause); /// The context in which we are checking a template parameter list. enum TemplateParamListContext { TPC_ClassTemplate, TPC_VarTemplate, TPC_FunctionTemplate, TPC_ClassTemplateMember, TPC_FriendClassTemplate, TPC_FriendFunctionTemplate, TPC_FriendFunctionTemplateDefinition, TPC_TypeAliasTemplate }; bool CheckTemplateParameterList(TemplateParameterList *NewParams, TemplateParameterList *OldParams, TemplateParamListContext TPC, SkipBodyInfo *SkipBody = nullptr); TemplateParameterList *MatchTemplateParametersToScopeSpecifier( SourceLocation DeclStartLoc, SourceLocation DeclLoc, const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId, ArrayRef<TemplateParameterList *> ParamLists, bool IsFriend, bool &IsMemberSpecialization, bool &Invalid); DeclResult CheckClassTemplate( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams, AccessSpecifier AS, SourceLocation ModulePrivateLoc, SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists, TemplateParameterList **OuterTemplateParamLists, SkipBodyInfo *SkipBody = nullptr); TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg, QualType NTTPType, SourceLocation Loc); void translateTemplateArguments(const ASTTemplateArgsPtr &In, TemplateArgumentListInfo &Out); ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType); void NoteAllFoundTemplates(TemplateName Name); QualType CheckTemplateIdType(TemplateName Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs); TypeResult ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy Template, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, bool IsCtorOrDtorName = false, bool IsClassName = false); /// Parsed an elaborated-type-specifier that refers to a template-id, /// such as \c class T::template apply<U>. TypeResult ActOnTagTemplateIdType(TagUseKind TUK, TypeSpecifierType TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateD, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgsIn, SourceLocation RAngleLoc); DeclResult ActOnVarTemplateSpecialization( Scope *S, Declarator &D, TypeSourceInfo *DI, SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams, StorageClass SC, bool IsPartialSpecialization); DeclResult CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation TemplateNameLoc, const TemplateArgumentListInfo &TemplateArgs); ExprResult CheckVarTemplateId(const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, VarTemplateDecl *Template, SourceLocation TemplateLoc, const TemplateArgumentListInfo *TemplateArgs); void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc); ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, bool RequiresADL, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); TemplateNameKind ActOnDependentTemplateName( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool AllowInjectedClassName = false); DeclResult ActOnClassTemplateSpecialization( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, SourceLocation ModulePrivateLoc, TemplateIdAnnotation &TemplateId, const ParsedAttributesView &Attr, MultiTemplateParamsArg TemplateParameterLists, SkipBodyInfo *SkipBody = nullptr); bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc, TemplateDecl *PrimaryTemplate, unsigned NumExplicitArgs, ArrayRef<TemplateArgument> Args); void CheckTemplatePartialSpecialization( ClassTemplatePartialSpecializationDecl *Partial); void CheckTemplatePartialSpecialization( VarTemplatePartialSpecializationDecl *Partial); Decl *ActOnTemplateDeclarator(Scope *S, MultiTemplateParamsArg TemplateParameterLists, Declarator &D); bool CheckSpecializationInstantiationRedecl(SourceLocation NewLoc, TemplateSpecializationKind NewTSK, NamedDecl *PrevDecl, TemplateSpecializationKind PrevTSK, SourceLocation PrevPtOfInstantiation, bool &SuppressNew); bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD, const TemplateArgumentListInfo &ExplicitTemplateArgs, LookupResult &Previous); bool CheckFunctionTemplateSpecialization( FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs, LookupResult &Previous, bool QualifiedFriend = false); bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous); void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous); DeclResult ActOnExplicitInstantiation( Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS, TemplateTy Template, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, Declarator &D); TemplateArgumentLoc SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, Decl *Param, SmallVectorImpl<TemplateArgument> &Converted, bool &HasDefaultArg); /// Specifies the context in which a particular template /// argument is being checked. enum CheckTemplateArgumentKind { /// The template argument was specified in the code or was /// instantiated with some deduced template arguments. CTAK_Specified, /// The template argument was deduced via template argument /// deduction. CTAK_Deduced, /// The template argument was deduced from an array bound /// via template argument deduction. CTAK_DeducedFromArrayBound }; bool CheckTemplateArgument(NamedDecl *Param, TemplateArgumentLoc &Arg, NamedDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, unsigned ArgumentPackIndex, SmallVectorImpl<TemplateArgument> &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); /// Check that the given template arguments can be be provided to /// the given template, converting the arguments along the way. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateLoc The location of the template name in the source. /// /// \param TemplateArgs The list of template arguments. If the template is /// a template template parameter, this function may extend the set of /// template arguments to also include substituted, defaulted template /// arguments. /// /// \param PartialTemplateArgs True if the list of template arguments is /// intentionally partial, e.g., because we're checking just the initial /// set of template arguments. /// /// \param Converted Will receive the converted, canonicalized template /// arguments. /// /// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to /// contain the converted forms of the template arguments as written. /// Otherwise, \p TemplateArgs will not be modified. /// /// \returns true if an error occurred, false otherwise. bool CheckTemplateArgumentList(TemplateDecl *Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs, bool PartialTemplateArgs, SmallVectorImpl<TemplateArgument> &Converted, bool UpdateArgsWithConversions = true); bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param, TemplateArgumentLoc &Arg, SmallVectorImpl<TemplateArgument> &Converted); bool CheckTemplateArgument(TemplateTypeParmDecl *Param, TypeSourceInfo *Arg); ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param, QualType InstantiatedParamType, Expr *Arg, TemplateArgument &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); bool CheckTemplateTemplateArgument(TemplateParameterList *Params, TemplateArgumentLoc &Arg); ExprResult BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg, QualType ParamType, SourceLocation Loc); ExprResult BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg, SourceLocation Loc); /// Enumeration describing how template parameter lists are compared /// for equality. enum TemplateParameterListEqualKind { /// We are matching the template parameter lists of two templates /// that might be redeclarations. /// /// \code /// template<typename T> struct X; /// template<typename T> struct X; /// \endcode TPL_TemplateMatch, /// We are matching the template parameter lists of two template /// template parameters as part of matching the template parameter lists /// of two templates that might be redeclarations. /// /// \code /// template<template<int I> class TT> struct X; /// template<template<int Value> class Other> struct X; /// \endcode TPL_TemplateTemplateParmMatch, /// We are matching the template parameter lists of a template /// template argument against the template parameter lists of a template /// template parameter. /// /// \code /// template<template<int Value> class Metafun> struct X; /// template<int Value> struct integer_c; /// X<integer_c> xic; /// \endcode TPL_TemplateTemplateArgumentMatch }; bool TemplateParameterListsAreEqual(TemplateParameterList *New, TemplateParameterList *Old, bool Complain, TemplateParameterListEqualKind Kind, SourceLocation TemplateArgLoc = SourceLocation()); bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams); /// Called when the parser has parsed a C++ typename /// specifier, e.g., "typename T::type". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param II the identifier we're retrieving (e.g., 'type' in the example). /// \param IdLoc the location of the identifier. TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, const IdentifierInfo &II, SourceLocation IdLoc); /// Called when the parser has parsed a C++ typename /// specifier that ends in a template-id, e.g., /// "typename MetaFun::template apply<T1, T2>". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param TemplateLoc the location of the 'template' keyword, if any. /// \param TemplateName The template name. /// \param TemplateII The identifier used to name the template. /// \param TemplateIILoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, SourceLocation TemplateLoc, TemplateTy TemplateName, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc); TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T, SourceLocation Loc, DeclarationName Name); bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS); ExprResult RebuildExprInCurrentInstantiation(Expr *E); bool RebuildTemplateParamsInCurrentInstantiation( TemplateParameterList *Params); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgumentList &Args); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgument *Args, unsigned NumArgs); //===--------------------------------------------------------------------===// // C++ Variadic Templates (C++0x [temp.variadic]) //===--------------------------------------------------------------------===// /// Determine whether an unexpanded parameter pack might be permitted in this /// location. Useful for error recovery. bool isUnexpandedParameterPackPermitted(); /// The context in which an unexpanded parameter pack is /// being diagnosed. /// /// Note that the values of this enumeration line up with the first /// argument to the \c err_unexpanded_parameter_pack diagnostic. enum UnexpandedParameterPackContext { /// An arbitrary expression. UPPC_Expression = 0, /// The base type of a class type. UPPC_BaseType, /// The type of an arbitrary declaration. UPPC_DeclarationType, /// The type of a data member. UPPC_DataMemberType, /// The size of a bit-field. UPPC_BitFieldWidth, /// The expression in a static assertion. UPPC_StaticAssertExpression, /// The fixed underlying type of an enumeration. UPPC_FixedUnderlyingType, /// The enumerator value. UPPC_EnumeratorValue, /// A using declaration. UPPC_UsingDeclaration, /// A friend declaration. UPPC_FriendDeclaration, /// A declaration qualifier. UPPC_DeclarationQualifier, /// An initializer. UPPC_Initializer, /// A default argument. UPPC_DefaultArgument, /// The type of a non-type template parameter. UPPC_NonTypeTemplateParameterType, /// The type of an exception. UPPC_ExceptionType, /// Partial specialization. UPPC_PartialSpecialization, /// Microsoft __if_exists. UPPC_IfExists, /// Microsoft __if_not_exists. UPPC_IfNotExists, /// Lambda expression. UPPC_Lambda, /// Block expression, UPPC_Block }; /// Diagnose unexpanded parameter packs. /// /// \param Loc The location at which we should emit the diagnostic. /// /// \param UPPC The context in which we are diagnosing unexpanded /// parameter packs. /// /// \param Unexpanded the set of unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc, UnexpandedParameterPackContext UPPC, ArrayRef<UnexpandedParameterPack> Unexpanded); /// If the given type contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The source location where a diagnostc should be emitted. /// /// \param T The type that is being checked for unexpanded parameter /// packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T, UnexpandedParameterPackContext UPPC); /// If the given expression contains an unexpanded parameter /// pack, diagnose the error. /// /// \param E The expression that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(Expr *E, UnexpandedParameterPackContext UPPC = UPPC_Expression); /// If the given nested-name-specifier contains an unexpanded /// parameter pack, diagnose the error. /// /// \param SS The nested-name-specifier that is being checked for /// unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS, UnexpandedParameterPackContext UPPC); /// If the given name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param NameInfo The name (with source location information) that /// is being checked for unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo, UnexpandedParameterPackContext UPPC); /// If the given template name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The location of the template name. /// /// \param Template The template name that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TemplateName Template, UnexpandedParameterPackContext UPPC); /// If the given template argument contains an unexpanded parameter /// pack, diagnose the error. /// /// \param Arg The template argument that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg, UnexpandedParameterPackContext UPPC); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgument Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param T The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(QualType T, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param TL The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TypeLoc TL, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// nested-name-specifier. /// /// \param NNS The nested-name-specifier that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// name. /// /// \param NameInfo The name that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Invoked when parsing a template argument followed by an /// ellipsis, which creates a pack expansion. /// /// \param Arg The template argument preceding the ellipsis, which /// may already be invalid. /// /// \param EllipsisLoc The location of the ellipsis. ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg, SourceLocation EllipsisLoc); /// Invoked when parsing a type followed by an ellipsis, which /// creates a pack expansion. /// /// \param Type The type preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc); /// Construct a pack expansion type from the pattern of the pack /// expansion. TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Construct a pack expansion type from the pattern of the pack /// expansion. QualType CheckPackExpansion(QualType Pattern, SourceRange PatternRange, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Determine whether we could expand a pack expansion with the /// given set of parameter packs into separate arguments by repeatedly /// transforming the pattern. /// /// \param EllipsisLoc The location of the ellipsis that identifies the /// pack expansion. /// /// \param PatternRange The source range that covers the entire pattern of /// the pack expansion. /// /// \param Unexpanded The set of unexpanded parameter packs within the /// pattern. /// /// \param ShouldExpand Will be set to \c true if the transformer should /// expand the corresponding pack expansions into separate arguments. When /// set, \c NumExpansions must also be set. /// /// \param RetainExpansion Whether the caller should add an unexpanded /// pack expansion after all of the expanded arguments. This is used /// when extending explicitly-specified template argument packs per /// C++0x [temp.arg.explicit]p9. /// /// \param NumExpansions The number of separate arguments that will be in /// the expanded form of the corresponding pack expansion. This is both an /// input and an output parameter, which can be set by the caller if the /// number of expansions is known a priori (e.g., due to a prior substitution) /// and will be set by the callee when the number of expansions is known. /// The callee must set this value when \c ShouldExpand is \c true; it may /// set this value in other cases. /// /// \returns true if an error occurred (e.g., because the parameter packs /// are to be instantiated with arguments of different lengths), false /// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions) /// must be set. bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc, SourceRange PatternRange, ArrayRef<UnexpandedParameterPack> Unexpanded, const MultiLevelTemplateArgumentList &TemplateArgs, bool &ShouldExpand, bool &RetainExpansion, Optional<unsigned> &NumExpansions); /// Determine the number of arguments in the given pack expansion /// type. /// /// This routine assumes that the number of arguments in the expansion is /// consistent across all of the unexpanded parameter packs in its pattern. /// /// Returns an empty Optional if the type can't be expanded. Optional<unsigned> getNumArgumentsInExpansion(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs); /// Determine whether the given declarator contains any unexpanded /// parameter packs. /// /// This routine is used by the parser to disambiguate function declarators /// with an ellipsis prior to the ')', e.g., /// /// \code /// void f(T...); /// \endcode /// /// To determine whether we have an (unnamed) function parameter pack or /// a variadic function. /// /// \returns true if the declarator contains any unexpanded parameter packs, /// false otherwise. bool containsUnexpandedParameterPacks(Declarator &D); /// Returns the pattern of the pack expansion for a template argument. /// /// \param OrigLoc The template argument to expand. /// /// \param Ellipsis Will be set to the location of the ellipsis. /// /// \param NumExpansions Will be set to the number of expansions that will /// be generated from this pack expansion, if known a priori. TemplateArgumentLoc getTemplateArgumentPackExpansionPattern( TemplateArgumentLoc OrigLoc, SourceLocation &Ellipsis, Optional<unsigned> &NumExpansions) const; /// Given a template argument that contains an unexpanded parameter pack, but /// which has already been substituted, attempt to determine the number of /// elements that will be produced once this argument is fully-expanded. /// /// This is intended for use when transforming 'sizeof...(Arg)' in order to /// avoid actually expanding the pack where possible. Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg); //===--------------------------------------------------------------------===// // C++ Template Argument Deduction (C++ [temp.deduct]) //===--------------------------------------------------------------------===// /// Adjust the type \p ArgFunctionType to match the calling convention, /// noreturn, and optionally the exception specification of \p FunctionType. /// Deduction often wants to ignore these properties when matching function /// types. QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType, bool AdjustExceptionSpec = false); /// Describes the result of template argument deduction. /// /// The TemplateDeductionResult enumeration describes the result of /// template argument deduction, as returned from /// DeduceTemplateArguments(). The separate TemplateDeductionInfo /// structure provides additional information about the results of /// template argument deduction, e.g., the deduced template argument /// list (if successful) or the specific template parameters or /// deduced arguments that were involved in the failure. enum TemplateDeductionResult { /// Template argument deduction was successful. TDK_Success = 0, /// The declaration was invalid; do nothing. TDK_Invalid, /// Template argument deduction exceeded the maximum template /// instantiation depth (which has already been diagnosed). TDK_InstantiationDepth, /// Template argument deduction did not deduce a value /// for every template parameter. TDK_Incomplete, /// Template argument deduction did not deduce a value for every /// expansion of an expanded template parameter pack. TDK_IncompletePack, /// Template argument deduction produced inconsistent /// deduced values for the given template parameter. TDK_Inconsistent, /// Template argument deduction failed due to inconsistent /// cv-qualifiers on a template parameter type that would /// otherwise be deduced, e.g., we tried to deduce T in "const T" /// but were given a non-const "X". TDK_Underqualified, /// Substitution of the deduced template argument values /// resulted in an error. TDK_SubstitutionFailure, /// After substituting deduced template arguments, a dependent /// parameter type did not match the corresponding argument. TDK_DeducedMismatch, /// After substituting deduced template arguments, an element of /// a dependent parameter type did not match the corresponding element /// of the corresponding argument (when deducing from an initializer list). TDK_DeducedMismatchNested, /// A non-depnedent component of the parameter did not match the /// corresponding component of the argument. TDK_NonDeducedMismatch, /// When performing template argument deduction for a function /// template, there were too many call arguments. TDK_TooManyArguments, /// When performing template argument deduction for a function /// template, there were too few call arguments. TDK_TooFewArguments, /// The explicitly-specified template arguments were not valid /// template arguments for the given template. TDK_InvalidExplicitArguments, /// Checking non-dependent argument conversions failed. TDK_NonDependentConversionFailure, /// Deduction failed; that's all we know. TDK_MiscellaneousDeductionFailure, /// CUDA Target attributes do not match. TDK_CUDATargetMismatch }; TemplateDeductionResult DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult SubstituteExplicitTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo &ExplicitTemplateArgs, SmallVectorImpl<DeducedTemplateArgument> &Deduced, SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType, sema::TemplateDeductionInfo &Info); /// brief A function argument from which we performed template argument // deduction for a call. struct OriginalCallArg { OriginalCallArg(QualType OriginalParamType, bool DecomposedParam, unsigned ArgIdx, QualType OriginalArgType) : OriginalParamType(OriginalParamType), DecomposedParam(DecomposedParam), ArgIdx(ArgIdx), OriginalArgType(OriginalArgType) {} QualType OriginalParamType; bool DecomposedParam; unsigned ArgIdx; QualType OriginalArgType; }; TemplateDeductionResult FinishTemplateArgumentDeduction( FunctionTemplateDecl *FunctionTemplate, SmallVectorImpl<DeducedTemplateArgument> &Deduced, unsigned NumExplicitlySpecified, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr, bool PartialOverloading = false, llvm::function_ref<bool()> CheckNonDependent = []{ return false; }); TemplateDeductionResult DeduceTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool PartialOverloading, llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ArgFunctionType, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, QualType ToType, CXXConversionDecl *&Specialization, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); /// Substitute Replacement for \p auto in \p TypeWithAuto QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement); /// Substitute Replacement for auto in TypeWithAuto TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, QualType Replacement); /// Completely replace the \c auto in \p TypeWithAuto by /// \p Replacement. This does not retain any \c auto type sugar. QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement); /// Result type of DeduceAutoType. enum DeduceAutoResult { DAR_Succeeded, DAR_Failed, DAR_FailedAlreadyDiagnosed }; DeduceAutoResult DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None); DeduceAutoResult DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None); void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init); bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc, bool Diagnose = true); /// Declare implicit deduction guides for a class template if we've /// not already done so. void DeclareImplicitDeductionGuides(TemplateDecl *Template, SourceLocation Loc); QualType DeduceTemplateSpecializationFromInitializer( TypeSourceInfo *TInfo, const InitializedEntity &Entity, const InitializationKind &Kind, MultiExprArg Init); QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name, QualType Type, TypeSourceInfo *TSI, SourceRange Range, bool DirectInit, Expr *Init); TypeLoc getReturnTypeLoc(FunctionDecl *FD) const; bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD, SourceLocation ReturnLoc, Expr *&RetExpr, AutoType *AT); FunctionTemplateDecl *getMoreSpecializedTemplate(FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc, TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1, unsigned NumCallArguments2); UnresolvedSetIterator getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd, TemplateSpecCandidateSet &FailedCandidates, SourceLocation Loc, const PartialDiagnostic &NoneDiag, const PartialDiagnostic &AmbigDiag, const PartialDiagnostic &CandidateDiag, bool Complain = true, QualType TargetType = QualType()); ClassTemplatePartialSpecializationDecl * getMoreSpecializedPartialSpecialization( ClassTemplatePartialSpecializationDecl *PS1, ClassTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization( VarTemplatePartialSpecializationDecl *PS1, VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); bool isTemplateTemplateParameterAtLeastAsSpecializedAs( TemplateParameterList *P, TemplateDecl *AArg, SourceLocation Loc); void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkDeducedTemplateParameters( const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced) { return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced); } static void MarkDeducedTemplateParameters(ASTContext &Ctx, const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced); //===--------------------------------------------------------------------===// // C++ Template Instantiation // MultiLevelTemplateArgumentList getTemplateInstantiationArgs(NamedDecl *D, const TemplateArgumentList *Innermost = nullptr, bool RelativeToPrimary = false, const FunctionDecl *Pattern = nullptr); /// A context in which code is being synthesized (where a source location /// alone is not sufficient to identify the context). This covers template /// instantiation and various forms of implicitly-generated functions. struct CodeSynthesisContext { /// The kind of template instantiation we are performing enum SynthesisKind { /// We are instantiating a template declaration. The entity is /// the declaration we're instantiating (e.g., a CXXRecordDecl). TemplateInstantiation, /// We are instantiating a default argument for a template /// parameter. The Entity is the template parameter whose argument is /// being instantiated, the Template is the template, and the /// TemplateArgs/NumTemplateArguments provide the template arguments as /// specified. DefaultTemplateArgumentInstantiation, /// We are instantiating a default argument for a function. /// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs /// provides the template arguments as specified. DefaultFunctionArgumentInstantiation, /// We are substituting explicit template arguments provided for /// a function template. The entity is a FunctionTemplateDecl. ExplicitTemplateArgumentSubstitution, /// We are substituting template argument determined as part of /// template argument deduction for either a class template /// partial specialization or a function template. The /// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or /// a TemplateDecl. DeducedTemplateArgumentSubstitution, /// We are substituting prior template arguments into a new /// template parameter. The template parameter itself is either a /// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl. PriorTemplateArgumentSubstitution, /// We are checking the validity of a default template argument that /// has been used when naming a template-id. DefaultTemplateArgumentChecking, /// We are computing the exception specification for a defaulted special /// member function. ExceptionSpecEvaluation, /// We are instantiating the exception specification for a function /// template which was deferred until it was needed. ExceptionSpecInstantiation, /// We are declaring an implicit special member function. DeclaringSpecialMember, /// We are defining a synthesized function (such as a defaulted special /// member). DefiningSynthesizedFunction, /// Added for Template instantiation observation. /// Memoization means we are _not_ instantiating a template because /// it is already instantiated (but we entered a context where we /// would have had to if it was not already instantiated). Memoization } Kind; /// Was the enclosing context a non-instantiation SFINAE context? bool SavedInNonInstantiationSFINAEContext; /// The point of instantiation or synthesis within the source code. SourceLocation PointOfInstantiation; /// The entity that is being synthesized. Decl *Entity; /// The template (or partial specialization) in which we are /// performing the instantiation, for substitutions of prior template /// arguments. NamedDecl *Template; /// The list of template arguments we are substituting, if they /// are not part of the entity. const TemplateArgument *TemplateArgs; // FIXME: Wrap this union around more members, or perhaps store the // kind-specific members in the RAII object owning the context. union { /// The number of template arguments in TemplateArgs. unsigned NumTemplateArgs; /// The special member being declared or defined. CXXSpecialMember SpecialMember; }; ArrayRef<TemplateArgument> template_arguments() const { assert(Kind != DeclaringSpecialMember); return {TemplateArgs, NumTemplateArgs}; } /// The template deduction info object associated with the /// substitution or checking of explicit or deduced template arguments. sema::TemplateDeductionInfo *DeductionInfo; /// The source range that covers the construct that cause /// the instantiation, e.g., the template-id that causes a class /// template instantiation. SourceRange InstantiationRange; CodeSynthesisContext() : Kind(TemplateInstantiation), SavedInNonInstantiationSFINAEContext(false), Entity(nullptr), Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0), DeductionInfo(nullptr) {} /// Determines whether this template is an actual instantiation /// that should be counted toward the maximum instantiation depth. bool isInstantiationRecord() const; }; /// List of active code synthesis contexts. /// /// This vector is treated as a stack. As synthesis of one entity requires /// synthesis of another, additional contexts are pushed onto the stack. SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts; /// Specializations whose definitions are currently being instantiated. llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations; /// Non-dependent types used in templates that have already been instantiated /// by some template instantiation. llvm::DenseSet<QualType> InstantiatedNonDependentTypes; /// Extra modules inspected when performing a lookup during a template /// instantiation. Computed lazily. SmallVector<Module*, 16> CodeSynthesisContextLookupModules; /// Cache of additional modules that should be used for name lookup /// within the current template instantiation. Computed lazily; use /// getLookupModules() to get a complete set. llvm::DenseSet<Module*> LookupModulesCache; /// Get the set of additional modules that should be checked during /// name lookup. A module and its imports become visible when instanting a /// template defined within it. llvm::DenseSet<Module*> &getLookupModules(); /// Map from the most recent declaration of a namespace to the most /// recent visible declaration of that namespace. llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache; /// Whether we are in a SFINAE context that is not associated with /// template instantiation. /// /// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside /// of a template instantiation or template argument deduction. bool InNonInstantiationSFINAEContext; /// The number of \p CodeSynthesisContexts that are not template /// instantiations and, therefore, should not be counted as part of the /// instantiation depth. /// /// When the instantiation depth reaches the user-configurable limit /// \p LangOptions::InstantiationDepth we will abort instantiation. // FIXME: Should we have a similar limit for other forms of synthesis? unsigned NonInstantiationEntries; /// The depth of the context stack at the point when the most recent /// error or warning was produced. /// /// This value is used to suppress printing of redundant context stacks /// when there are multiple errors or warnings in the same instantiation. // FIXME: Does this belong in Sema? It's tough to implement it anywhere else. unsigned LastEmittedCodeSynthesisContextDepth = 0; /// The template instantiation callbacks to trace or track /// instantiations (objects can be chained). /// /// This callbacks is used to print, trace or track template /// instantiations as they are being constructed. std::vector<std::unique_ptr<TemplateInstantiationCallback>> TemplateInstCallbacks; /// The current index into pack expansion arguments that will be /// used for substitution of parameter packs. /// /// The pack expansion index will be -1 to indicate that parameter packs /// should be instantiated as themselves. Otherwise, the index specifies /// which argument within the parameter pack will be used for substitution. int ArgumentPackSubstitutionIndex; /// RAII object used to change the argument pack substitution index /// within a \c Sema object. /// /// See \c ArgumentPackSubstitutionIndex for more information. class ArgumentPackSubstitutionIndexRAII { Sema &Self; int OldSubstitutionIndex; public: ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex) : Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) { Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex; } ~ArgumentPackSubstitutionIndexRAII() { Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex; } }; friend class ArgumentPackSubstitutionRAII; /// For each declaration that involved template argument deduction, the /// set of diagnostics that were suppressed during that template argument /// deduction. /// /// FIXME: Serialize this structure to the AST file. typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> > SuppressedDiagnosticsMap; SuppressedDiagnosticsMap SuppressedDiagnostics; /// A stack object to be created when performing template /// instantiation. /// /// Construction of an object of type \c InstantiatingTemplate /// pushes the current instantiation onto the stack of active /// instantiations. If the size of this stack exceeds the maximum /// number of recursive template instantiations, construction /// produces an error and evaluates true. /// /// Destruction of this object will pop the named instantiation off /// the stack. struct InstantiatingTemplate { /// Note that we are instantiating a class template, /// function template, variable template, alias template, /// or a member thereof. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, Decl *Entity, SourceRange InstantiationRange = SourceRange()); struct ExceptionSpecification {}; /// Note that we are instantiating an exception specification /// of a function template. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionDecl *Entity, ExceptionSpecification, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument in a /// template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateParameter Param, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting either explicitly-specified or /// deduced template arguments during function template argument deduction. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionTemplateDecl *FunctionTemplate, ArrayRef<TemplateArgument> TemplateArgs, CodeSynthesisContext::SynthesisKind Kind, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template declaration. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ClassTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a variable template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, VarTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument for a function /// parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParmVarDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting prior template arguments into a /// non-type parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, NonTypeTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are substituting prior template arguments into a /// template template parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, TemplateTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are checking the default template argument /// against the template parameter for a given template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, NamedDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we have finished instantiating this template. void Clear(); ~InstantiatingTemplate() { Clear(); } /// Determines whether we have exceeded the maximum /// recursive template instantiations. bool isInvalid() const { return Invalid; } /// Determine whether we are already instantiating this /// specialization in some surrounding active instantiation. bool isAlreadyInstantiating() const { return AlreadyInstantiating; } private: Sema &SemaRef; bool Invalid; bool AlreadyInstantiating; bool CheckInstantiationDepth(SourceLocation PointOfInstantiation, SourceRange InstantiationRange); InstantiatingTemplate( Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind, SourceLocation PointOfInstantiation, SourceRange InstantiationRange, Decl *Entity, NamedDecl *Template = nullptr, ArrayRef<TemplateArgument> TemplateArgs = None, sema::TemplateDeductionInfo *DeductionInfo = nullptr); InstantiatingTemplate(const InstantiatingTemplate&) = delete; InstantiatingTemplate& operator=(const InstantiatingTemplate&) = delete; }; void pushCodeSynthesisContext(CodeSynthesisContext Ctx); void popCodeSynthesisContext(); /// Determine whether we are currently performing template instantiation. bool inTemplateInstantiation() const { return CodeSynthesisContexts.size() > NonInstantiationEntries; } void PrintContextStack() { if (!CodeSynthesisContexts.empty() && CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) { PrintInstantiationStack(); LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size(); } if (PragmaAttributeCurrentTargetDecl) PrintPragmaAttributeInstantiationPoint(); } void PrintInstantiationStack(); void PrintPragmaAttributeInstantiationPoint(); /// Determines whether we are currently in a context where /// template argument substitution failures are not considered /// errors. /// /// \returns An empty \c Optional if we're not in a SFINAE context. /// Otherwise, contains a pointer that, if non-NULL, contains the nearest /// template-deduction context object, which can be used to capture /// diagnostics that will be suppressed. Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const; /// Determines whether we are currently in a context that /// is not evaluated as per C++ [expr] p5. bool isUnevaluatedContext() const { assert(!ExprEvalContexts.empty() && "Must be in an expression evaluation context"); return ExprEvalContexts.back().isUnevaluated(); } /// RAII class used to determine whether SFINAE has /// trapped any errors that occur during template argument /// deduction. class SFINAETrap { Sema &SemaRef; unsigned PrevSFINAEErrors; bool PrevInNonInstantiationSFINAEContext; bool PrevAccessCheckingSFINAE; bool PrevLastDiagnosticIgnored; public: explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false) : SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors), PrevInNonInstantiationSFINAEContext( SemaRef.InNonInstantiationSFINAEContext), PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE), PrevLastDiagnosticIgnored( SemaRef.getDiagnostics().isLastDiagnosticIgnored()) { if (!SemaRef.isSFINAEContext()) SemaRef.InNonInstantiationSFINAEContext = true; SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE; } ~SFINAETrap() { SemaRef.NumSFINAEErrors = PrevSFINAEErrors; SemaRef.InNonInstantiationSFINAEContext = PrevInNonInstantiationSFINAEContext; SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE; SemaRef.getDiagnostics().setLastDiagnosticIgnored( PrevLastDiagnosticIgnored); } /// Determine whether any SFINAE errors have been trapped. bool hasErrorOccurred() const { return SemaRef.NumSFINAEErrors > PrevSFINAEErrors; } }; /// RAII class used to indicate that we are performing provisional /// semantic analysis to determine the validity of a construct, so /// typo-correction and diagnostics in the immediate context (not within /// implicitly-instantiated templates) should be suppressed. class TentativeAnalysisScope { Sema &SemaRef; // FIXME: Using a SFINAETrap for this is a hack. SFINAETrap Trap; bool PrevDisableTypoCorrection; public: explicit TentativeAnalysisScope(Sema &SemaRef) : SemaRef(SemaRef), Trap(SemaRef, true), PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) { SemaRef.DisableTypoCorrection = true; } ~TentativeAnalysisScope() { SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection; } }; /// The current instantiation scope used to store local /// variables. LocalInstantiationScope *CurrentInstantiationScope; /// Tracks whether we are in a context where typo correction is /// disabled. bool DisableTypoCorrection; /// The number of typos corrected by CorrectTypo. unsigned TyposCorrected; typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet; typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations; /// A cache containing identifiers for which typo correction failed and /// their locations, so that repeated attempts to correct an identifier in a /// given location are ignored if typo correction already failed for it. IdentifierSourceLocations TypoCorrectionFailures; /// Worker object for performing CFG-based warnings. sema::AnalysisBasedWarnings AnalysisWarnings; threadSafety::BeforeSet *ThreadSafetyDeclCache; /// An entity for which implicit template instantiation is required. /// /// The source location associated with the declaration is the first place in /// the source code where the declaration was "used". It is not necessarily /// the point of instantiation (which will be either before or after the /// namespace-scope declaration that triggered this implicit instantiation), /// However, it is the location that diagnostics should generally refer to, /// because users will need to know what code triggered the instantiation. typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation; /// The queue of implicit template instantiations that are required /// but have not yet been performed. std::deque<PendingImplicitInstantiation> PendingInstantiations; /// Queue of implicit template instantiations that cannot be performed /// eagerly. SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations; class GlobalEagerInstantiationScope { public: GlobalEagerInstantiationScope(Sema &S, bool Enabled) : S(S), Enabled(Enabled) { if (!Enabled) return; SavedPendingInstantiations.swap(S.PendingInstantiations); SavedVTableUses.swap(S.VTableUses); } void perform() { if (Enabled) { S.DefineUsedVTables(); S.PerformPendingInstantiations(); } } ~GlobalEagerInstantiationScope() { if (!Enabled) return; // Restore the set of pending vtables. assert(S.VTableUses.empty() && "VTableUses should be empty before it is discarded."); S.VTableUses.swap(SavedVTableUses); // Restore the set of pending implicit instantiations. assert(S.PendingInstantiations.empty() && "PendingInstantiations should be empty before it is discarded."); S.PendingInstantiations.swap(SavedPendingInstantiations); } private: Sema &S; SmallVector<VTableUse, 16> SavedVTableUses; std::deque<PendingImplicitInstantiation> SavedPendingInstantiations; bool Enabled; }; /// The queue of implicit template instantiations that are required /// and must be performed within the current local scope. /// /// This queue is only used for member functions of local classes in /// templates, which must be instantiated in the same scope as their /// enclosing function, so that they can reference function-local /// types, static variables, enumerators, etc. std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations; class LocalEagerInstantiationScope { public: LocalEagerInstantiationScope(Sema &S) : S(S) { SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); } ~LocalEagerInstantiationScope() { assert(S.PendingLocalImplicitInstantiations.empty() && "there shouldn't be any pending local implicit instantiations"); SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } private: Sema &S; std::deque<PendingImplicitInstantiation> SavedPendingLocalImplicitInstantiations; }; /// A helper class for building up ExtParameterInfos. class ExtParameterInfoBuilder { SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos; bool HasInteresting = false; public: /// Set the ExtParameterInfo for the parameter at the given index, /// void set(unsigned index, FunctionProtoType::ExtParameterInfo info) { assert(Infos.size() <= index); Infos.resize(index); Infos.push_back(info); if (!HasInteresting) HasInteresting = (info != FunctionProtoType::ExtParameterInfo()); } /// Return a pointer (suitable for setting in an ExtProtoInfo) to the /// ExtParameterInfo array we've built up. const FunctionProtoType::ExtParameterInfo * getPointerOrNull(unsigned numParams) { if (!HasInteresting) return nullptr; Infos.resize(numParams); return Infos.data(); } }; void PerformPendingInstantiations(bool LocalOnly = false); TypeSourceInfo *SubstType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, bool AllowDeducedTST = false); QualType SubstType(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstType(TypeLoc TL, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, CXXRecordDecl *ThisContext, Qualifiers ThisTypeQuals); void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto, const MultiLevelTemplateArgumentList &Args); bool SubstExceptionSpec(SourceLocation Loc, FunctionProtoType::ExceptionSpecInfo &ESI, SmallVectorImpl<QualType> &ExceptionStorage, const MultiLevelTemplateArgumentList &Args); ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, int indexAdjustment, Optional<unsigned> NumExpansions, bool ExpectParameterPack); bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params, const FunctionProtoType::ExtParameterInfo *ExtParamInfos, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<QualType> &ParamTypes, SmallVectorImpl<ParmVarDecl *> *OutParams, ExtParameterInfoBuilder &ParamInfos); ExprResult SubstExpr(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs); /// Substitute the given template arguments into a list of /// expressions, expanding pack expansions if required. /// /// \param Exprs The list of expressions to substitute into. /// /// \param IsCall Whether this is some form of call, in which case /// default arguments will be dropped. /// /// \param TemplateArgs The set of template arguments to substitute. /// /// \param Outputs Will receive all of the substituted arguments. /// /// \returns true if an error occurred, false otherwise. bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<Expr *> &Outputs); StmtResult SubstStmt(Stmt *S, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateParameterList * SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); Decl *SubstDecl(Decl *D, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); ExprResult SubstInitializer(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs, bool CXXDirectInit); bool SubstBaseSpecifiers(CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateClass(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK, bool Complain = true); bool InstantiateEnum(SourceLocation PointOfInstantiation, EnumDecl *Instantiation, EnumDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); bool InstantiateInClassInitializer( SourceLocation PointOfInstantiation, FieldDecl *Instantiation, FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); struct LateInstantiatedAttribute { const Attr *TmplAttr; LocalInstantiationScope *Scope; Decl *NewDecl; LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S, Decl *D) : TmplAttr(A), Scope(S), NewDecl(D) { } }; typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec; void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); void InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); bool usesPartialOrExplicitSpecialization( SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec); bool InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK, bool Complain = true); void InstantiateClassMembers(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); void InstantiateClassTemplateSpecializationMembers( SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK); NestedNameSpecifierLoc SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS, const MultiLevelTemplateArgumentList &TemplateArgs); DeclarationNameInfo SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateName SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name, SourceLocation Loc, const MultiLevelTemplateArgumentList &TemplateArgs); bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs, TemplateArgumentListInfo &Result, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateExceptionSpec(SourceLocation PointOfInstantiation, FunctionDecl *Function); FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD, const TemplateArgumentList *Args, SourceLocation Loc); void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation, FunctionDecl *Function, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); VarTemplateSpecializationDecl *BuildVarTemplateInstantiation( VarTemplateDecl *VarTemplate, VarDecl *FromVar, const TemplateArgumentList &TemplateArgList, const TemplateArgumentListInfo &TemplateArgsInfo, SmallVectorImpl<TemplateArgument> &Converted, SourceLocation PointOfInstantiation, void *InsertPos, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *StartingScope = nullptr); VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl( VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl, const MultiLevelTemplateArgumentList &TemplateArgs); void BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs, LateInstantiatedAttrVec *LateAttrs, DeclContext *Owner, LocalInstantiationScope *StartingScope, bool InstantiatingVarTemplate = false, VarTemplateSpecializationDecl *PrevVTSD = nullptr); void InstantiateVariableInitializer( VarDecl *Var, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateVariableDefinition(SourceLocation PointOfInstantiation, VarDecl *Var, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); void InstantiateMemInitializers(CXXConstructorDecl *New, const CXXConstructorDecl *Tmpl, const MultiLevelTemplateArgumentList &TemplateArgs); NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, bool FindingInstantiatedContext = false); DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC, const MultiLevelTemplateArgumentList &TemplateArgs); // Objective-C declarations. enum ObjCContainerKind { OCK_None = -1, OCK_Interface = 0, OCK_Protocol, OCK_Category, OCK_ClassExtension, OCK_Implementation, OCK_CategoryImplementation }; ObjCContainerKind getObjCContainerKind() const; DeclResult actOnObjCTypeParam(Scope *S, ObjCTypeParamVariance variance, SourceLocation varianceLoc, unsigned index, IdentifierInfo *paramName, SourceLocation paramLoc, SourceLocation colonLoc, ParsedType typeBound); ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc, ArrayRef<Decl *> typeParams, SourceLocation rAngleLoc); void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList); Decl *ActOnStartClassInterface( Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); void ActOnSuperClassOfClassInterface(Scope *S, SourceLocation AtInterfaceLoc, ObjCInterfaceDecl *IDecl, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange); void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs, SmallVectorImpl<SourceLocation> &ProtocolLocs, IdentifierInfo *SuperName, SourceLocation SuperLoc); Decl *ActOnCompatibilityAlias( SourceLocation AtCompatibilityAliasLoc, IdentifierInfo *AliasName, SourceLocation AliasLocation, IdentifierInfo *ClassName, SourceLocation ClassLocation); bool CheckForwardProtocolDeclarationForCircularDependency( IdentifierInfo *PName, SourceLocation &PLoc, SourceLocation PrevLoc, const ObjCList<ObjCProtocolDecl> &PList); Decl *ActOnStartProtocolInterface( SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName, SourceLocation ProtocolLoc, Decl *const *ProtoRefNames, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryInterface( SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *CategoryName, SourceLocation CategoryLoc, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartClassImplementation(SourceLocation AtClassImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperClassname, SourceLocation SuperClassLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *CatName, SourceLocation CatLoc, const ParsedAttributesView &AttrList); DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl, ArrayRef<Decl *> Decls); DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc, IdentifierInfo **IdentList, SourceLocation *IdentLocs, ArrayRef<ObjCTypeParamList *> TypeParamLists, unsigned NumElts); DeclGroupPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc, ArrayRef<IdentifierLocPair> IdentList, const ParsedAttributesView &attrList); void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer, ArrayRef<IdentifierLocPair> ProtocolId, SmallVectorImpl<Decl *> &Protocols); void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId, SourceLocation ProtocolLoc, IdentifierInfo *TypeArgId, SourceLocation TypeArgLoc, bool SelectProtocolFirst = false); /// Given a list of identifiers (and their locations), resolve the /// names to either Objective-C protocol qualifiers or type /// arguments, as appropriate. void actOnObjCTypeArgsOrProtocolQualifiers( Scope *S, ParsedType baseType, SourceLocation lAngleLoc, ArrayRef<IdentifierInfo *> identifiers, ArrayRef<SourceLocation> identifierLocs, SourceLocation rAngleLoc, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SourceLocation &protocolRAngleLoc, bool warnOnIncompleteProtocols); /// Build a an Objective-C protocol-qualified 'id' type where no /// base type was specified. TypeResult actOnObjCProtocolQualifierType( SourceLocation lAngleLoc, ArrayRef<Decl *> protocols, ArrayRef<SourceLocation> protocolLocs, SourceLocation rAngleLoc); /// Build a specialized and/or protocol-qualified Objective-C type. TypeResult actOnObjCTypeArgsAndProtocolQualifiers( Scope *S, SourceLocation Loc, ParsedType BaseType, SourceLocation TypeArgsLAngleLoc, ArrayRef<ParsedType> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<Decl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc); /// Build an Objective-C type parameter type. QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Build an Objective-C object pointer type. QualType BuildObjCObjectType(QualType BaseType, SourceLocation Loc, SourceLocation TypeArgsLAngleLoc, ArrayRef<TypeSourceInfo *> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Ensure attributes are consistent with type. /// \param [in, out] Attributes The attributes to check; they will /// be modified to be consistent with \p PropertyTy. void CheckObjCPropertyAttributes(Decl *PropertyPtrTy, SourceLocation Loc, unsigned &Attributes, bool propertyInPrimaryClass); /// Process the specified property declaration and create decls for the /// setters and getters as needed. /// \param property The property declaration being processed void ProcessPropertyDecl(ObjCPropertyDecl *property); void DiagnosePropertyMismatch(ObjCPropertyDecl *Property, ObjCPropertyDecl *SuperProperty, const IdentifierInfo *Name, bool OverridingProtocolProperty); void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT, ObjCInterfaceDecl *ID); Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods = None, ArrayRef<DeclGroupPtrTy> allTUVars = None); Decl *ActOnProperty(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, ObjCDeclSpec &ODS, Selector GetterSel, Selector SetterSel, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); Decl *ActOnPropertyImplDecl(Scope *S, SourceLocation AtLoc, SourceLocation PropertyLoc, bool ImplKind, IdentifierInfo *PropertyId, IdentifierInfo *PropertyIvar, SourceLocation PropertyIvarLoc, ObjCPropertyQueryKind QueryKind); enum ObjCSpecialMethodKind { OSMK_None, OSMK_Alloc, OSMK_New, OSMK_Copy, OSMK_RetainingInit, OSMK_NonRetainingInit }; struct ObjCArgInfo { IdentifierInfo *Name; SourceLocation NameLoc; // The Type is null if no type was specified, and the DeclSpec is invalid // in this case. ParsedType Type; ObjCDeclSpec DeclSpec; /// ArgAttrs - Attribute list for this argument. ParsedAttributesView ArgAttrs; }; Decl *ActOnMethodDeclaration( Scope *S, SourceLocation BeginLoc, // location of the + or -. SourceLocation EndLoc, // location of the ; or {. tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType, ArrayRef<SourceLocation> SelectorLocs, Selector Sel, // optional arguments. The number of types/arguments is obtained // from the Sel.getNumArgs(). ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo, unsigned CNumArgs, // c-style args const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind, bool isVariadic, bool MethodDefinition); ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel, const ObjCObjectPointerType *OPT, bool IsInstance); ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty, bool IsInstance); bool CheckARCMethodDecl(ObjCMethodDecl *method); bool inferObjCARCLifetime(ValueDecl *decl); ExprResult HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT, Expr *BaseExpr, SourceLocation OpLoc, DeclarationName MemberName, SourceLocation MemberLoc, SourceLocation SuperLoc, QualType SuperType, bool Super); ExprResult ActOnClassPropertyRefExpr(IdentifierInfo &receiverName, IdentifierInfo &propertyName, SourceLocation receiverNameLoc, SourceLocation propertyNameLoc); ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc); /// Describes the kind of message expression indicated by a message /// send that starts with an identifier. enum ObjCMessageKind { /// The message is sent to 'super'. ObjCSuperMessage, /// The message is an instance message. ObjCInstanceMessage, /// The message is a class message, and the identifier is a type /// name. ObjCClassMessage }; ObjCMessageKind getObjCMessageKind(Scope *S, IdentifierInfo *Name, SourceLocation NameLoc, bool IsSuper, bool HasTrailingDot, ParsedType &ReceiverType); ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildClassMessageImplicit(QualType ReceiverType, bool isSuperReceiver, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnClassMessage(Scope *S, ParsedType Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildInstanceMessage(Expr *Receiver, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildInstanceMessageImplicit(Expr *Receiver, QualType ReceiverType, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnInstanceMessage(Scope *S, Expr *Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, TypeSourceInfo *TSInfo, Expr *SubExpr); ExprResult ActOnObjCBridgedCast(Scope *S, SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, ParsedType Type, SourceLocation RParenLoc, Expr *SubExpr); void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr); void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr); bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr, CastKind &Kind); bool checkObjCBridgeRelatedComponents(SourceLocation Loc, QualType DestType, QualType SrcType, ObjCInterfaceDecl *&RelatedClass, ObjCMethodDecl *&ClassMethod, ObjCMethodDecl *&InstanceMethod, TypedefNameDecl *&TDNDecl, bool CfToNs, bool Diagnose = true); bool CheckObjCBridgeRelatedConversions(SourceLocation Loc, QualType DestType, QualType SrcType, Expr *&SrcExpr, bool Diagnose = true); bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr, bool Diagnose = true); bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall); /// Check whether the given new method is a valid override of the /// given overridden method, and set any properties that should be inherited. void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod, const ObjCMethodDecl *Overridden); /// Describes the compatibility of a result type with its method. enum ResultTypeCompatibilityKind { RTC_Compatible, RTC_Incompatible, RTC_Unknown }; void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod, ObjCInterfaceDecl *CurrentClass, ResultTypeCompatibilityKind RTC); enum PragmaOptionsAlignKind { POAK_Native, // #pragma options align=native POAK_Natural, // #pragma options align=natural POAK_Packed, // #pragma options align=packed POAK_Power, // #pragma options align=power POAK_Mac68k, // #pragma options align=mac68k POAK_Reset // #pragma options align=reset }; /// ActOnPragmaClangSection - Called on well formed \#pragma clang section void ActOnPragmaClangSection(SourceLocation PragmaLoc, PragmaClangSectionAction Action, PragmaClangSectionKind SecKind, StringRef SecName); /// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align. void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind, SourceLocation PragmaLoc); /// ActOnPragmaPack - Called on well formed \#pragma pack(...). void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action, StringRef SlotLabel, Expr *Alignment); enum class PragmaPackDiagnoseKind { NonDefaultStateAtInclude, ChangedStateAtExit }; void DiagnoseNonDefaultPragmaPack(PragmaPackDiagnoseKind Kind, SourceLocation IncludeLoc); void DiagnoseUnterminatedPragmaPack(); /// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off]. void ActOnPragmaMSStruct(PragmaMSStructKind Kind); /// ActOnPragmaMSComment - Called on well formed /// \#pragma comment(kind, "arg"). void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind, StringRef Arg); /// ActOnPragmaMSPointersToMembers - called on well formed \#pragma /// pointers_to_members(representation method[, general purpose /// representation]). void ActOnPragmaMSPointersToMembers( LangOptions::PragmaMSPointersToMembersKind Kind, SourceLocation PragmaLoc); /// Called on well formed \#pragma vtordisp(). void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action, SourceLocation PragmaLoc, MSVtorDispAttr::Mode Value); enum PragmaSectionKind { PSK_DataSeg, PSK_BSSSeg, PSK_ConstSeg, PSK_CodeSeg, }; bool UnifySection(StringRef SectionName, int SectionFlags, DeclaratorDecl *TheDecl); bool UnifySection(StringRef SectionName, int SectionFlags, SourceLocation PragmaSectionLocation); /// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg. void ActOnPragmaMSSeg(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, StringLiteral *SegmentName, llvm::StringRef PragmaName); /// Called on well formed \#pragma section(). void ActOnPragmaMSSection(SourceLocation PragmaLocation, int SectionFlags, StringLiteral *SegmentName); /// Called on well-formed \#pragma init_seg(). void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation, StringLiteral *SegmentName); /// Called on #pragma clang __debug dump II void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II); /// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name, StringRef Value); /// ActOnPragmaUnused - Called on well-formed '\#pragma unused'. void ActOnPragmaUnused(const Token &Identifier, Scope *curScope, SourceLocation PragmaLoc); /// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... . void ActOnPragmaVisibility(const IdentifierInfo* VisType, SourceLocation PragmaLoc); NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II, SourceLocation Loc); void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W); /// ActOnPragmaWeakID - Called on well formed \#pragma weak ident. void ActOnPragmaWeakID(IdentifierInfo* WeakName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc); /// ActOnPragmaRedefineExtname - Called on well formed /// \#pragma redefine_extname oldname newname. void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident. void ActOnPragmaWeakAlias(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaFPContract - Called on well formed /// \#pragma {STDC,OPENCL} FP_CONTRACT and /// \#pragma clang fp contract void ActOnPragmaFPContract(LangOptions::FPContractModeKind FPC); /// ActOnPragmaFenvAccess - Called on well formed /// \#pragma STDC FENV_ACCESS void ActOnPragmaFEnvAccess(LangOptions::FEnvAccessModeKind FPC); /// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to /// a the record decl, to handle '\#pragma pack' and '\#pragma options align'. void AddAlignmentAttributesForRecord(RecordDecl *RD); /// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record. void AddMsStructLayoutForRecord(RecordDecl *RD); /// FreePackedContext - Deallocate and null out PackContext. void FreePackedContext(); /// PushNamespaceVisibilityAttr - Note that we've entered a /// namespace with a visibility attribute. void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr, SourceLocation Loc); /// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used, /// add an appropriate visibility attribute. void AddPushedVisibilityAttribute(Decl *RD); /// PopPragmaVisibility - Pop the top element of the visibility stack; used /// for '\#pragma GCC visibility' and visibility attributes on namespaces. void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc); /// FreeVisContext - Deallocate and null out VisContext. void FreeVisContext(); /// AddCFAuditedAttribute - Check whether we're currently within /// '\#pragma clang arc_cf_code_audited' and, if so, consider adding /// the appropriate attribute. void AddCFAuditedAttribute(Decl *D); void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute, SourceLocation PragmaLoc, attr::ParsedSubjectMatchRuleSet Rules); void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Called on well-formed '\#pragma clang attribute pop'. void ActOnPragmaAttributePop(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Adds the attributes that have been specified using the /// '\#pragma clang attribute push' directives to the given declaration. void AddPragmaAttributes(Scope *S, Decl *D); void DiagnoseUnterminatedPragmaAttribute(); /// Called on well formed \#pragma clang optimize. void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc); /// Get the location for the currently active "\#pragma clang optimize /// off". If this location is invalid, then the state of the pragma is "on". SourceLocation getOptimizeOffPragmaLocation() const { return OptimizeOffPragmaLocation; } /// Only called on function definitions; if there is a pragma in scope /// with the effect of a range-based optnone, consider marking the function /// with attribute optnone. void AddRangeBasedOptnone(FunctionDecl *FD); /// Adds the 'optnone' attribute to the function declaration if there /// are no conflicts; Loc represents the location causing the 'optnone' /// attribute to be added (usually because of a pragma). void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc); /// AddAlignedAttr - Adds an aligned attribute to a particular declaration. void AddAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, unsigned SpellingListIndex, bool IsPackExpansion); void AddAlignedAttr(SourceRange AttrRange, Decl *D, TypeSourceInfo *T, unsigned SpellingListIndex, bool IsPackExpansion); /// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular /// declaration. void AddAssumeAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, Expr *OE, unsigned SpellingListIndex); /// AddAllocAlignAttr - Adds an alloc_align attribute to a particular /// declaration. void AddAllocAlignAttr(SourceRange AttrRange, Decl *D, Expr *ParamExpr, unsigned SpellingListIndex); /// AddAlignValueAttr - Adds an align_value attribute to a particular /// declaration. void AddAlignValueAttr(SourceRange AttrRange, Decl *D, Expr *E, unsigned SpellingListIndex); /// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular /// declaration. void AddLaunchBoundsAttr(SourceRange AttrRange, Decl *D, Expr *MaxThreads, Expr *MinBlocks, unsigned SpellingListIndex); /// AddModeAttr - Adds a mode attribute to a particular declaration. void AddModeAttr(SourceRange AttrRange, Decl *D, IdentifierInfo *Name, unsigned SpellingListIndex, bool InInstantiation = false); void AddParameterABIAttr(SourceRange AttrRange, Decl *D, ParameterABI ABI, unsigned SpellingListIndex); enum class RetainOwnershipKind {NS, CF, OS}; void AddXConsumedAttr(Decl *D, SourceRange SR, unsigned SpellingIndex, RetainOwnershipKind K, bool IsTemplateInstantiation); /// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size /// attribute to a particular declaration. void addAMDGPUFlatWorkGroupSizeAttr(SourceRange AttrRange, Decl *D, Expr *Min, Expr *Max, unsigned SpellingListIndex); /// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a /// particular declaration. void addAMDGPUWavesPerEUAttr(SourceRange AttrRange, Decl *D, Expr *Min, Expr *Max, unsigned SpellingListIndex); bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type); //===--------------------------------------------------------------------===// // C++ Coroutines TS // bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc, StringRef Keyword); ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E); StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, UnresolvedLookupExpr* Lookup); ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E); StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs); bool buildCoroutineParameterMoves(SourceLocation Loc); VarDecl *buildCoroutinePromise(SourceLocation Loc); void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body); ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc, SourceLocation FuncLoc); //===--------------------------------------------------------------------===// // OpenCL extensions. // private: std::string CurrOpenCLExtension; /// Extensions required by an OpenCL type. llvm::DenseMap<const Type*, std::set<std::string>> OpenCLTypeExtMap; /// Extensions required by an OpenCL declaration. llvm::DenseMap<const Decl*, std::set<std::string>> OpenCLDeclExtMap; public: llvm::StringRef getCurrentOpenCLExtension() const { return CurrOpenCLExtension; } /// Check if a function declaration \p FD associates with any /// extensions present in OpenCLDeclExtMap and if so return the /// extension(s) name(s). std::string getOpenCLExtensionsFromDeclExtMap(FunctionDecl *FD); /// Check if a function type \p FT associates with any /// extensions present in OpenCLTypeExtMap and if so return the /// extension(s) name(s). std::string getOpenCLExtensionsFromTypeExtMap(FunctionType *FT); /// Find an extension in an appropriate extension map and return its name template<typename T, typename MapT> std::string getOpenCLExtensionsFromExtMap(T* FT, MapT &Map); void setCurrentOpenCLExtension(llvm::StringRef Ext) { CurrOpenCLExtension = Ext; } /// Set OpenCL extensions for a type which can only be used when these /// OpenCL extensions are enabled. If \p Exts is empty, do nothing. /// \param Exts A space separated list of OpenCL extensions. void setOpenCLExtensionForType(QualType T, llvm::StringRef Exts); /// Set OpenCL extensions for a declaration which can only be /// used when these OpenCL extensions are enabled. If \p Exts is empty, do /// nothing. /// \param Exts A space separated list of OpenCL extensions. void setOpenCLExtensionForDecl(Decl *FD, llvm::StringRef Exts); /// Set current OpenCL extensions for a type which can only be used /// when these OpenCL extensions are enabled. If current OpenCL extension is /// empty, do nothing. void setCurrentOpenCLExtensionForType(QualType T); /// Set current OpenCL extensions for a declaration which /// can only be used when these OpenCL extensions are enabled. If current /// OpenCL extension is empty, do nothing. void setCurrentOpenCLExtensionForDecl(Decl *FD); bool isOpenCLDisabledDecl(Decl *FD); /// Check if type \p T corresponding to declaration specifier \p DS /// is disabled due to required OpenCL extensions being disabled. If so, /// emit diagnostics. /// \return true if type is disabled. bool checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType T); /// Check if declaration \p D used by expression \p E /// is disabled due to required OpenCL extensions being disabled. If so, /// emit diagnostics. /// \return true if type is disabled. bool checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E); //===--------------------------------------------------------------------===// // OpenMP directives and clauses. // private: void *VarDataSharingAttributesStack; /// Number of nested '#pragma omp declare target' directives. unsigned DeclareTargetNestingLevel = 0; /// Initialization of data-sharing attributes stack. void InitDataSharingAttributesStack(); void DestroyDataSharingAttributesStack(); ExprResult VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind, bool StrictlyPositive = true); /// Returns OpenMP nesting level for current directive. unsigned getOpenMPNestingLevel() const; /// Adjusts the function scopes index for the target-based regions. void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex, unsigned Level) const; /// Push new OpenMP function region for non-capturing function. void pushOpenMPFunctionRegion(); /// Pop OpenMP function region for non-capturing function. void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI); /// Check whether we're allowed to call Callee from the current function. void checkOpenMPDeviceFunction(SourceLocation Loc, FunctionDecl *Callee); /// Check if the expression is allowed to be used in expressions for the /// OpenMP devices. void checkOpenMPDeviceExpr(const Expr *E); /// Checks if a type or a declaration is disabled due to the owning extension /// being disabled, and emits diagnostic messages if it is disabled. /// \param D type or declaration to be checked. /// \param DiagLoc source location for the diagnostic message. /// \param DiagInfo information to be emitted for the diagnostic message. /// \param SrcRange source range of the declaration. /// \param Map maps type or declaration to the extensions. /// \param Selector selects diagnostic message: 0 for type and 1 for /// declaration. /// \return true if the type or declaration is disabled. template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT> bool checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc, DiagInfoT DiagInfo, MapT &Map, unsigned Selector = 0, SourceRange SrcRange = SourceRange()); public: /// Return true if the provided declaration \a VD should be captured by /// reference. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level) const; /// Check if the specified variable is used in one of the private /// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP /// constructs. VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false, unsigned StopAt = 0); ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK, ExprObjectKind OK, SourceLocation Loc); /// If the current region is a loop-based region, mark the start of the loop /// construct. void startOpenMPLoop(); /// Check if the specified variable is used in 'private' clause. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPPrivateDecl(const ValueDecl *D, unsigned Level) const; /// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.) /// for \p FD based on DSA for the provided corresponding captured declaration /// \p D. void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level); /// Check if the specified variable is captured by 'target' directive. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level) const; ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc, Expr *Op); /// Called on start of new data sharing attribute block. void StartOpenMPDSABlock(OpenMPDirectiveKind K, const DeclarationNameInfo &DirName, Scope *CurScope, SourceLocation Loc); /// Start analysis of clauses. void StartOpenMPClause(OpenMPClauseKind K); /// End analysis of clauses. void EndOpenMPClause(); /// Called on end of data sharing attribute block. void EndOpenMPDSABlock(Stmt *CurDirective); /// Check if the current region is an OpenMP loop region and if it is, /// mark loop control variable, used in \p Init for loop initialization, as /// private by default. /// \param Init First part of the for loop. void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init); // OpenMP directives and clauses. /// Called on correct id-expression from the '#pragma omp /// threadprivate'. ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, OpenMPDirectiveKind Kind); /// Called on well-formed '#pragma omp threadprivate'. DeclGroupPtrTy ActOnOpenMPThreadprivateDirective( SourceLocation Loc, ArrayRef<Expr *> VarList); /// Builds a new OpenMPThreadPrivateDecl and checks its correctness. OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc, ArrayRef<Expr *> VarList); /// Called on well-formed '#pragma omp allocate'. DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc, ArrayRef<Expr *> VarList, ArrayRef<OMPClause *> Clauses, DeclContext *Owner = nullptr); /// Called on well-formed '#pragma omp requires'. DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc, ArrayRef<OMPClause *> ClauseList); /// Check restrictions on Requires directive OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc, ArrayRef<OMPClause *> Clauses); /// Check if the specified type is allowed to be used in 'omp declare /// reduction' construct. QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart( Scope *S, DeclContext *DC, DeclarationName Name, ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes, AccessSpecifier AS, Decl *PrevDeclInScope = nullptr); /// Initialize declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner); /// Initialize declare reduction construct initializer. /// \return omp_priv variable. VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer, VarDecl *OmpPrivParm); /// Called at the end of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd( Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid); /// Check variable declaration in 'omp declare mapper' construct. TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D); /// Check if the specified type is allowed to be used in 'omp declare /// mapper' construct. QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare mapper'. OMPDeclareMapperDecl *ActOnOpenMPDeclareMapperDirectiveStart( Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType, SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS, Decl *PrevDeclInScope = nullptr); /// Build the mapper variable of '#pragma omp declare mapper'. void ActOnOpenMPDeclareMapperDirectiveVarDecl(OMPDeclareMapperDecl *DMD, Scope *S, QualType MapperType, SourceLocation StartLoc, DeclarationName VN); /// Called at the end of '#pragma omp declare mapper'. DeclGroupPtrTy ActOnOpenMPDeclareMapperDirectiveEnd(OMPDeclareMapperDecl *D, Scope *S, ArrayRef<OMPClause *> ClauseList); /// Called on the start of target region i.e. '#pragma omp declare target'. bool ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc); /// Called at the end of target region i.e. '#pragme omp end declare target'. void ActOnFinishOpenMPDeclareTargetDirective(); /// Called on correct id-expression from the '#pragma omp declare target'. void ActOnOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, OMPDeclareTargetDeclAttr::MapTypeTy MT, NamedDeclSetType &SameDirectiveDecls); /// Check declaration inside target region. void checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D, SourceLocation IdLoc = SourceLocation()); /// Return true inside OpenMP declare target region. bool isInOpenMPDeclareTargetContext() const { return DeclareTargetNestingLevel > 0; } /// Return true inside OpenMP target region. bool isInOpenMPTargetExecutionDirective() const; /// Return true if (un)supported features for the current target should be /// diagnosed if OpenMP (offloading) is enabled. bool shouldDiagnoseTargetSupportFromOpenMP() const { return !getLangOpts().OpenMPIsDevice || isInOpenMPDeclareTargetContext() || isInOpenMPTargetExecutionDirective(); } /// Return the number of captured regions created for an OpenMP directive. static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind); /// Initialization of captured region for OpenMP region. void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope); /// End of OpenMP region. /// /// \param S Statement associated with the current OpenMP region. /// \param Clauses List of clauses for the current OpenMP region. /// /// \returns Statement for finished OpenMP region. StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses); StmtResult ActOnOpenMPExecutableDirective( OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName, OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); using VarsWithInheritedDSAType = llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>; /// Called on well-formed '\#pragma omp simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp for' after parsing /// of the associated statement. StmtResult ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp for simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp sections' after parsing /// of the associated statement. StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp section' after parsing of the /// associated statement. StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp single' after parsing of the /// associated statement. StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp master' after parsing of the /// associated statement. StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp critical' after parsing of the /// associated statement. StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel for' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel sections' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp task' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskyield'. StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp barrier'. StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskwait'. StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskgroup'. StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp flush'. StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp ordered' after parsing of the /// associated statement. StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp atomic' after parsing of the /// associated statement. StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target data' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target enter data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target exit data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target parallel' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp cancellation point'. StmtResult ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp cancel'. StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp taskloop' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp taskloop simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target update'. StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp distribute parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute' after parsing of /// the associated statement. StmtResult ActOnOpenMPTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target teams distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for /// simd' after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Checks correctness of linear modifiers. bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind, SourceLocation LinLoc); /// Checks that the specified declaration matches requirements for the linear /// decls. bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc, OpenMPLinearClauseKind LinKind, QualType Type); /// Called on well-formed '\#pragma omp declare simd' after parsing of /// the associated method/function. DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective( DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS, Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds, ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears, ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR); OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'allocator' clause. OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'if' clause. OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier, Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation NameModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'final' clause. OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_threads' clause. OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'safelen' clause. OMPClause *ActOnOpenMPSafelenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'simdlen' clause. OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'collapse' clause. OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'ordered' clause. OMPClause * ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc, SourceLocation LParenLoc = SourceLocation(), Expr *NumForLoops = nullptr); /// Called on well-formed 'grainsize' clause. OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_tasks' clause. OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'hint' clause. OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind, unsigned Argument, SourceLocation ArgumentLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'default' clause. OMPClause *ActOnOpenMPDefaultClause(OpenMPDefaultClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'proc_bind' clause. OMPClause *ActOnOpenMPProcBindClause(OpenMPProcBindClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSingleExprWithArgClause( OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc, SourceLocation EndLoc); /// Called on well-formed 'schedule' clause. OMPClause *ActOnOpenMPScheduleClause( OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2, OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nowait' clause. OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'untied' clause. OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'mergeable' clause. OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'read' clause. OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'write' clause. OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'update' clause. OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'capture' clause. OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'seq_cst' clause. OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'threads' clause. OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'simd' clause. OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nogroup' clause. OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'reverse_offload' clause. OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'dynamic_allocators' clause. OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'atomic_default_mem_order' clause. OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause( OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPVarListClause( OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *TailExpr, const OMPVarListLocTy &Locs, SourceLocation ColonLoc, CXXScopeSpec &ReductionOrMapperIdScopeSpec, DeclarationNameInfo &ReductionOrMapperId, OpenMPDependClauseKind DepKind, OpenMPLinearClauseKind LinKind, ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, SourceLocation DepLinMapLoc); /// Called on well-formed 'allocate' clause. OMPClause * ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation ColonLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'private' clause. OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'firstprivate' clause. OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'lastprivate' clause. OMPClause *ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'shared' clause. OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'reduction' clause. OMPClause *ActOnOpenMPReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'task_reduction' clause. OMPClause *ActOnOpenMPTaskReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'in_reduction' clause. OMPClause *ActOnOpenMPInReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'linear' clause. OMPClause * ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step, SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind LinKind, SourceLocation LinLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'aligned' clause. OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList, Expr *Alignment, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'copyin' clause. OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'copyprivate' clause. OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'flush' pseudo clause. OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'depend' clause. OMPClause * ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind, SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'device' clause. OMPClause *ActOnOpenMPDeviceClause(Expr *Device, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'map' clause. OMPClause * ActOnOpenMPMapClause(ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, SourceLocation MapLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'num_teams' clause. OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'thread_limit' clause. OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'priority' clause. OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'dist_schedule' clause. OMPClause *ActOnOpenMPDistScheduleClause( OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); /// Called on well-formed 'defaultmap' clause. OMPClause *ActOnOpenMPDefaultmapClause( OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc, SourceLocation KindLoc, SourceLocation EndLoc); /// Called on well-formed 'to' clause. OMPClause * ActOnOpenMPToClause(ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'from' clause. OMPClause *ActOnOpenMPFromClause( ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'use_device_ptr' clause. OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'is_device_ptr' clause. OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// The kind of conversion being performed. enum CheckedConversionKind { /// An implicit conversion. CCK_ImplicitConversion, /// A C-style cast. CCK_CStyleCast, /// A functional-style cast. CCK_FunctionalCast, /// A cast other than a C-style cast. CCK_OtherCast, /// A conversion for an operand of a builtin overloaded operator. CCK_ForBuiltinOverloadedOp }; static bool isCast(CheckedConversionKind CCK) { return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast || CCK == CCK_OtherCast; } /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit /// cast. If there is already an implicit cast, merge into the existing one. /// If isLvalue, the result of the cast is an lvalue. ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK, ExprValueKind VK = VK_RValue, const CXXCastPath *BasePath = nullptr, CheckedConversionKind CCK = CCK_ImplicitConversion); /// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding /// to the conversion from scalar type ScalarTy to the Boolean type. static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy); /// IgnoredValueConversions - Given that an expression's result is /// syntactically ignored, perform any conversions that are /// required. ExprResult IgnoredValueConversions(Expr *E); // UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts // functions and arrays to their respective pointers (C99 6.3.2.1). ExprResult UsualUnaryConversions(Expr *E); /// CallExprUnaryConversions - a special case of an unary conversion /// performed on a function designator of a call expression. ExprResult CallExprUnaryConversions(Expr *E); // DefaultFunctionArrayConversion - converts functions and arrays // to their respective pointers (C99 6.3.2.1). ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true); // DefaultFunctionArrayLvalueConversion - converts functions and // arrays to their respective pointers and performs the // lvalue-to-rvalue conversion. ExprResult DefaultFunctionArrayLvalueConversion(Expr *E, bool Diagnose = true); // DefaultLvalueConversion - performs lvalue-to-rvalue conversion on // the operand. This is DefaultFunctionArrayLvalueConversion, // except that it assumes the operand isn't of function or array // type. ExprResult DefaultLvalueConversion(Expr *E); // DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that // do not have a prototype. Integer promotions are performed on each // argument, and arguments that have type float are promoted to double. ExprResult DefaultArgumentPromotion(Expr *E); /// If \p E is a prvalue denoting an unmaterialized temporary, materialize /// it as an xvalue. In C++98, the result will still be a prvalue, because /// we don't have xvalues there. ExprResult TemporaryMaterializationConversion(Expr *E); // Used for emitting the right warning by DefaultVariadicArgumentPromotion enum VariadicCallType { VariadicFunction, VariadicBlock, VariadicMethod, VariadicConstructor, VariadicDoesNotApply }; VariadicCallType getVariadicCallType(FunctionDecl *FDecl, const FunctionProtoType *Proto, Expr *Fn); // Used for determining in which context a type is allowed to be passed to a // vararg function. enum VarArgKind { VAK_Valid, VAK_ValidInCXX11, VAK_Undefined, VAK_MSVCUndefined, VAK_Invalid }; // Determines which VarArgKind fits an expression. VarArgKind isValidVarArgType(const QualType &Ty); /// Check to see if the given expression is a valid argument to a variadic /// function, issuing a diagnostic if not. void checkVariadicArgument(const Expr *E, VariadicCallType CT); /// Check to see if a given expression could have '.c_str()' called on it. bool hasCStrMethod(const Expr *E); /// GatherArgumentsForCall - Collector argument expressions for various /// form of call prototypes. bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl, const FunctionProtoType *Proto, unsigned FirstParam, ArrayRef<Expr *> Args, SmallVectorImpl<Expr *> &AllArgs, VariadicCallType CallType = VariadicDoesNotApply, bool AllowExplicit = false, bool IsListInitialization = false); // DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but // will create a runtime trap if the resulting type is not a POD type. ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT, FunctionDecl *FDecl); // UsualArithmeticConversions - performs the UsualUnaryConversions on it's // operands and then handles various conversions that are common to binary // operators (C99 6.3.1.8). If both operands aren't arithmetic, this // routine returns the first non-arithmetic type found. The client is // responsible for emitting appropriate error diagnostics. QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS, bool IsCompAssign = false); /// AssignConvertType - All of the 'assignment' semantic checks return this /// enum to indicate whether the assignment was allowed. These checks are /// done for simple assignments, as well as initialization, return from /// function, argument passing, etc. The query is phrased in terms of a /// source and destination type. enum AssignConvertType { /// Compatible - the types are compatible according to the standard. Compatible, /// PointerToInt - The assignment converts a pointer to an int, which we /// accept as an extension. PointerToInt, /// IntToPointer - The assignment converts an int to a pointer, which we /// accept as an extension. IntToPointer, /// FunctionVoidPointer - The assignment is between a function pointer and /// void*, which the standard doesn't allow, but we accept as an extension. FunctionVoidPointer, /// IncompatiblePointer - The assignment is between two pointers types that /// are not compatible, but we accept them as an extension. IncompatiblePointer, /// IncompatiblePointerSign - The assignment is between two pointers types /// which point to integers which have a different sign, but are otherwise /// identical. This is a subset of the above, but broken out because it's by /// far the most common case of incompatible pointers. IncompatiblePointerSign, /// CompatiblePointerDiscardsQualifiers - The assignment discards /// c/v/r qualifiers, which we accept as an extension. CompatiblePointerDiscardsQualifiers, /// IncompatiblePointerDiscardsQualifiers - The assignment /// discards qualifiers that we don't permit to be discarded, /// like address spaces. IncompatiblePointerDiscardsQualifiers, /// IncompatibleNestedPointerAddressSpaceMismatch - The assignment /// changes address spaces in nested pointer types which is not allowed. /// For instance, converting __private int ** to __generic int ** is /// illegal even though __private could be converted to __generic. IncompatibleNestedPointerAddressSpaceMismatch, /// IncompatibleNestedPointerQualifiers - The assignment is between two /// nested pointer types, and the qualifiers other than the first two /// levels differ e.g. char ** -> const char **, but we accept them as an /// extension. IncompatibleNestedPointerQualifiers, /// IncompatibleVectors - The assignment is between two vector types that /// have the same size, which we accept as an extension. IncompatibleVectors, /// IntToBlockPointer - The assignment converts an int to a block /// pointer. We disallow this. IntToBlockPointer, /// IncompatibleBlockPointer - The assignment is between two block /// pointers types that are not compatible. IncompatibleBlockPointer, /// IncompatibleObjCQualifiedId - The assignment is between a qualified /// id type and something else (that is incompatible with it). For example, /// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol. IncompatibleObjCQualifiedId, /// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an /// object with __weak qualifier. IncompatibleObjCWeakRef, /// Incompatible - We reject this conversion outright, it is invalid to /// represent it in the AST. Incompatible }; /// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the /// assignment conversion type specified by ConvTy. This returns true if the /// conversion was invalid or false if the conversion was accepted. bool DiagnoseAssignmentResult(AssignConvertType ConvTy, SourceLocation Loc, QualType DstType, QualType SrcType, Expr *SrcExpr, AssignmentAction Action, bool *Complained = nullptr); /// IsValueInFlagEnum - Determine if a value is allowed as part of a flag /// enum. If AllowMask is true, then we also allow the complement of a valid /// value, to be used as a mask. bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val, bool AllowMask) const; /// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant /// integer not in the range of enum values. void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType, Expr *SrcExpr); /// CheckAssignmentConstraints - Perform type checking for assignment, /// argument passing, variable initialization, and function return values. /// C99 6.5.16. AssignConvertType CheckAssignmentConstraints(SourceLocation Loc, QualType LHSType, QualType RHSType); /// Check assignment constraints and optionally prepare for a conversion of /// the RHS to the LHS type. The conversion is prepared for if ConvertRHS /// is true. AssignConvertType CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS, CastKind &Kind, bool ConvertRHS = true); /// Check assignment constraints for an assignment of RHS to LHSType. /// /// \param LHSType The destination type for the assignment. /// \param RHS The source expression for the assignment. /// \param Diagnose If \c true, diagnostics may be produced when checking /// for assignability. If a diagnostic is produced, \p RHS will be /// set to ExprError(). Note that this function may still return /// without producing a diagnostic, even for an invalid assignment. /// \param DiagnoseCFAudited If \c true, the target is a function parameter /// in an audited Core Foundation API and does not need to be checked /// for ARC retain issues. /// \param ConvertRHS If \c true, \p RHS will be updated to model the /// conversions necessary to perform the assignment. If \c false, /// \p Diagnose must also be \c false. AssignConvertType CheckSingleAssignmentConstraints( QualType LHSType, ExprResult &RHS, bool Diagnose = true, bool DiagnoseCFAudited = false, bool ConvertRHS = true); // If the lhs type is a transparent union, check whether we // can initialize the transparent union with the given expression. AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType, ExprResult &RHS); bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType); bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit = false); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit, ImplicitConversionSequence& ICS); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const ImplicitConversionSequence& ICS, AssignmentAction Action, CheckedConversionKind CCK = CCK_ImplicitConversion); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const StandardConversionSequence& SCS, AssignmentAction Action, CheckedConversionKind CCK); ExprResult PerformQualificationConversion( Expr *E, QualType Ty, ExprValueKind VK = VK_RValue, CheckedConversionKind CCK = CCK_ImplicitConversion); /// the following "Check" methods will return a valid/converted QualType /// or a null QualType (indicating an error diagnostic was issued). /// type checking binary operators (subroutines of CreateBuiltinBinOp). QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType CheckPointerToMemberOperands( // C++ 5.5 ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, SourceLocation OpLoc, bool isIndirect); QualType CheckMultiplyDivideOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool IsDivide); QualType CheckRemainderOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign = false); QualType CheckAdditionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr); QualType CheckSubtractionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, QualType* CompLHSTy = nullptr); QualType CheckShiftOperands( // C99 6.5.7 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, bool IsCompAssign = false); QualType CheckCompareOperands( // C99 6.5.8/9 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckBitwiseOperands( // C99 6.5.[10...12] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckLogicalOperands( // C99 6.5.[13,14] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); // CheckAssignmentOperands is used for both simple and compound assignment. // For simple assignment, pass both expressions and a null converted type. // For compound assignment, pass both expressions and the converted type. QualType CheckAssignmentOperands( // C99 6.5.16.[1,2] Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType); ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opcode, Expr *Op); ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opcode, Expr *LHS, Expr *RHS); ExprResult checkPseudoObjectRValue(Expr *E); Expr *recreateSyntacticForm(PseudoObjectExpr *E); QualType CheckConditionalOperands( // C99 6.5.15 ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc); QualType CXXCheckConditionalOperands( // C++ 5.16 ExprResult &cond, ExprResult &lhs, ExprResult &rhs, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc); QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2, bool ConvertArgs = true); QualType FindCompositePointerType(SourceLocation Loc, ExprResult &E1, ExprResult &E2, bool ConvertArgs = true) { Expr *E1Tmp = E1.get(), *E2Tmp = E2.get(); QualType Composite = FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs); E1 = E1Tmp; E2 = E2Tmp; return Composite; } QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr, SourceLocation QuestionLoc); void DiagnoseAlwaysNonNullPointer(Expr *E, Expr::NullPointerConstantKind NullType, bool IsEqual, SourceRange Range); /// type checking for vector binary operators. QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool AllowBothBool, bool AllowBoolConversion); QualType GetSignedVectorType(QualType V); QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc); bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType); bool isLaxVectorConversion(QualType srcType, QualType destType); /// type checking declaration initializers (C99 6.7.8) bool CheckForConstantInitializer(Expr *e, QualType t); // type checking C++ declaration initializers (C++ [dcl.init]). /// ReferenceCompareResult - Expresses the result of comparing two /// types (cv1 T1 and cv2 T2) to determine their compatibility for the /// purposes of initialization by reference (C++ [dcl.init.ref]p4). enum ReferenceCompareResult { /// Ref_Incompatible - The two types are incompatible, so direct /// reference binding is not possible. Ref_Incompatible = 0, /// Ref_Related - The two types are reference-related, which means /// that their unqualified forms (T1 and T2) are either the same /// or T1 is a base class of T2. Ref_Related, /// Ref_Compatible - The two types are reference-compatible. Ref_Compatible }; ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2, bool &DerivedToBase, bool &ObjCConversion, bool &ObjCLifetimeConversion); ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType, Expr *CastExpr, CastKind &CastKind, ExprValueKind &VK, CXXCastPath &Path); /// Force an expression with unknown-type to an expression of the /// given type. ExprResult forceUnknownAnyToType(Expr *E, QualType ToType); /// Type-check an expression that's being passed to an /// __unknown_anytype parameter. ExprResult checkUnknownAnyArg(SourceLocation callLoc, Expr *result, QualType &paramType); // CheckVectorCast - check type constraints for vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size. // returns true if the cast is invalid bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty, CastKind &Kind); /// Prepare `SplattedExpr` for a vector splat operation, adding /// implicit casts if necessary. ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr); // CheckExtVectorCast - check type constraints for extended vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size, // or vectors and the element type of that vector. // returns the cast expr ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr, CastKind &Kind); ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type, SourceLocation LParenLoc, Expr *CastExpr, SourceLocation RParenLoc); enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error }; /// Checks for invalid conversions and casts between /// retainable pointers and other pointer kinds for ARC and Weak. ARCConversionResult CheckObjCConversion(SourceRange castRange, QualType castType, Expr *&op, CheckedConversionKind CCK, bool Diagnose = true, bool DiagnoseCFAudited = false, BinaryOperatorKind Opc = BO_PtrMemD ); Expr *stripARCUnbridgedCast(Expr *e); void diagnoseARCUnbridgedCast(Expr *e); bool CheckObjCARCUnavailableWeakConversion(QualType castType, QualType ExprType); /// checkRetainCycles - Check whether an Objective-C message send /// might create an obvious retain cycle. void checkRetainCycles(ObjCMessageExpr *msg); void checkRetainCycles(Expr *receiver, Expr *argument); void checkRetainCycles(VarDecl *Var, Expr *Init); /// checkUnsafeAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained type. bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS); /// checkUnsafeExprAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained expression. void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS); /// CheckMessageArgumentTypes - Check types in an Obj-C message send. /// \param Method - May be null. /// \param [out] ReturnType - The return type of the send. /// \return true iff there were any incompatible types. bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType, MultiExprArg Args, Selector Sel, ArrayRef<SourceLocation> SelectorLocs, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage, SourceLocation lbrac, SourceLocation rbrac, SourceRange RecRange, QualType &ReturnType, ExprValueKind &VK); /// Determine the result of a message send expression based on /// the type of the receiver, the method expected to receive the message, /// and the form of the message send. QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage); /// If the given expression involves a message send to a method /// with a related result type, emit a note describing what happened. void EmitRelatedResultTypeNote(const Expr *E); /// Given that we had incompatible pointer types in a return /// statement, check whether we're in a method with a related result /// type, and if so, emit a note describing what happened. void EmitRelatedResultTypeNoteForReturn(QualType destType); class ConditionResult { Decl *ConditionVar; FullExprArg Condition; bool Invalid; bool HasKnownValue; bool KnownValue; friend class Sema; ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition, bool IsConstexpr) : ConditionVar(ConditionVar), Condition(Condition), Invalid(false), HasKnownValue(IsConstexpr && Condition.get() && !Condition.get()->isValueDependent()), KnownValue(HasKnownValue && !!Condition.get()->EvaluateKnownConstInt(S.Context)) {} explicit ConditionResult(bool Invalid) : ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid), HasKnownValue(false), KnownValue(false) {} public: ConditionResult() : ConditionResult(false) {} bool isInvalid() const { return Invalid; } std::pair<VarDecl *, Expr *> get() const { return std::make_pair(cast_or_null<VarDecl>(ConditionVar), Condition.get()); } llvm::Optional<bool> getKnownValue() const { if (!HasKnownValue) return None; return KnownValue; } }; static ConditionResult ConditionError() { return ConditionResult(true); } enum class ConditionKind { Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'. ConstexprIf, ///< A constant boolean condition from 'if constexpr'. Switch ///< An integral condition for a 'switch' statement. }; ConditionResult ActOnCondition(Scope *S, SourceLocation Loc, Expr *SubExpr, ConditionKind CK); ConditionResult ActOnConditionVariable(Decl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D); ExprResult CheckConditionVariable(VarDecl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond); /// CheckBooleanCondition - Diagnose problems involving the use of /// the given expression as a boolean condition (e.g. in an if /// statement). Also performs the standard function and array /// decays, possibly changing the input variable. /// /// \param Loc - A location associated with the condition, e.g. the /// 'if' keyword. /// \return true iff there were any errors ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E, bool IsConstexpr = false); /// ActOnExplicitBoolSpecifier - Build an ExplicitSpecifier from an expression /// found in an explicit(bool) specifier. ExplicitSpecifier ActOnExplicitBoolSpecifier(Expr *E); /// tryResolveExplicitSpecifier - Attempt to resolve the explict specifier. /// Returns true if the explicit specifier is now resolved. bool tryResolveExplicitSpecifier(ExplicitSpecifier &ExplicitSpec); /// DiagnoseAssignmentAsCondition - Given that an expression is /// being used as a boolean condition, warn if it's an assignment. void DiagnoseAssignmentAsCondition(Expr *E); /// Redundant parentheses over an equality comparison can indicate /// that the user intended an assignment used as condition. void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE); /// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid. ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false); /// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have /// the specified width and sign. If an overflow occurs, detect it and emit /// the specified diagnostic. void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal, unsigned NewWidth, bool NewSign, SourceLocation Loc, unsigned DiagID); /// Checks that the Objective-C declaration is declared in the global scope. /// Emits an error and marks the declaration as invalid if it's not declared /// in the global scope. bool CheckObjCDeclScope(Decl *D); /// Abstract base class used for diagnosing integer constant /// expression violations. class VerifyICEDiagnoser { public: bool Suppress; VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { } virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0; virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR); virtual ~VerifyICEDiagnoser() { } }; /// VerifyIntegerConstantExpression - Verifies that an expression is an ICE, /// and reports the appropriate diagnostics. Returns false on success. /// Can optionally return the value of the expression. ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, VerifyICEDiagnoser &Diagnoser, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, unsigned DiagID, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result = nullptr); /// VerifyBitField - verifies that a bit field expression is an ICE and has /// the correct width, and that the field type is valid. /// Returns false on success. /// Can optionally return whether the bit-field is of width 0 ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName, QualType FieldTy, bool IsMsStruct, Expr *BitWidth, bool *ZeroWidth = nullptr); private: unsigned ForceCUDAHostDeviceDepth = 0; public: /// Increments our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. So long as this count is greater /// than zero, all functions encountered will be __host__ __device__. void PushForceCUDAHostDevice(); /// Decrements our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. Returns false if the count is 0 /// before incrementing, so you can emit an error. bool PopForceCUDAHostDevice(); /// Diagnostics that are emitted only if we discover that the given function /// must be codegen'ed. Because handling these correctly adds overhead to /// compilation, this is currently only enabled for CUDA compilations. llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>, std::vector<PartialDiagnosticAt>> DeviceDeferredDiags; /// A pair of a canonical FunctionDecl and a SourceLocation. When used as the /// key in a hashtable, both the FD and location are hashed. struct FunctionDeclAndLoc { CanonicalDeclPtr<FunctionDecl> FD; SourceLocation Loc; }; /// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a /// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the /// same deferred diag twice. llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags; /// An inverse call graph, mapping known-emitted functions to one of their /// known-emitted callers (plus the location of the call). /// /// Functions that we can tell a priori must be emitted aren't added to this /// map. llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>, /* Caller = */ FunctionDeclAndLoc> DeviceKnownEmittedFns; /// A partial call graph maintained during CUDA/OpenMP device code compilation /// to support deferred diagnostics. /// /// Functions are only added here if, at the time they're considered, they are /// not known-emitted. As soon as we discover that a function is /// known-emitted, we remove it and everything it transitively calls from this /// set and add those functions to DeviceKnownEmittedFns. llvm::DenseMap</* Caller = */ CanonicalDeclPtr<FunctionDecl>, /* Callees = */ llvm::MapVector<CanonicalDeclPtr<FunctionDecl>, SourceLocation>> DeviceCallGraph; /// Diagnostic builder for CUDA/OpenMP devices errors which may or may not be /// deferred. /// /// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch) /// which are not allowed to appear inside __device__ functions and are /// allowed to appear in __host__ __device__ functions only if the host+device /// function is never codegen'ed. /// /// To handle this, we use the notion of "deferred diagnostics", where we /// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed. /// /// This class lets you emit either a regular diagnostic, a deferred /// diagnostic, or no diagnostic at all, according to an argument you pass to /// its constructor, thus simplifying the process of creating these "maybe /// deferred" diagnostics. class DeviceDiagBuilder { public: enum Kind { /// Emit no diagnostics. K_Nop, /// Emit the diagnostic immediately (i.e., behave like Sema::Diag()). K_Immediate, /// Emit the diagnostic immediately, and, if it's a warning or error, also /// emit a call stack showing how this function can be reached by an a /// priori known-emitted function. K_ImmediateWithCallStack, /// Create a deferred diagnostic, which is emitted only if the function /// it's attached to is codegen'ed. Also emit a call stack as with /// K_ImmediateWithCallStack. K_Deferred }; DeviceDiagBuilder(Kind K, SourceLocation Loc, unsigned DiagID, FunctionDecl *Fn, Sema &S); DeviceDiagBuilder(DeviceDiagBuilder &&D); DeviceDiagBuilder(const DeviceDiagBuilder &) = default; ~DeviceDiagBuilder(); /// Convertible to bool: True if we immediately emitted an error, false if /// we didn't emit an error or we created a deferred error. /// /// Example usage: /// /// if (DeviceDiagBuilder(...) << foo << bar) /// return ExprError(); /// /// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably /// want to use these instead of creating a DeviceDiagBuilder yourself. operator bool() const { return ImmediateDiag.hasValue(); } template <typename T> friend const DeviceDiagBuilder &operator<<(const DeviceDiagBuilder &Diag, const T &Value) { if (Diag.ImmediateDiag.hasValue()) *Diag.ImmediateDiag << Value; else if (Diag.PartialDiagId.hasValue()) Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second << Value; return Diag; } private: Sema &S; SourceLocation Loc; unsigned DiagID; FunctionDecl *Fn; bool ShowCallStack; // Invariant: At most one of these Optionals has a value. // FIXME: Switch these to a Variant once that exists. llvm::Optional<SemaDiagnosticBuilder> ImmediateDiag; llvm::Optional<unsigned> PartialDiagId; }; /// Indicate that this function (and thus everything it transtively calls) /// will be codegen'ed, and emit any deferred diagnostics on this function and /// its (transitive) callees. void markKnownEmitted( Sema &S, FunctionDecl *OrigCaller, FunctionDecl *OrigCallee, SourceLocation OrigLoc, const llvm::function_ref<bool(Sema &, FunctionDecl *)> IsKnownEmitted); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current context /// is "used as device code". /// /// - If CurContext is a __host__ function, does not emit any diagnostics. /// - If CurContext is a __device__ or __global__ function, emits the /// diagnostics immediately. /// - If CurContext is a __host__ __device__ function and we are compiling for /// the device, creates a diagnostic which is emitted if and when we realize /// that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in CUDA device code. /// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget()) /// return ExprError(); /// // Otherwise, continue parsing as normal. DeviceDiagBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current context /// is "used as host code". /// /// Same as CUDADiagIfDeviceCode, with "host" and "device" switched. DeviceDiagBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current /// context is "used as device code". /// /// - If CurContext is a `declare target` function or it is known that the /// function is emitted for the device, emits the diagnostics immediately. /// - If CurContext is a non-`declare target` function and we are compiling /// for the device, creates a diagnostic which is emitted if and when we /// realize that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in NVPTX device code. /// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported)) /// return ExprError(); /// // Otherwise, continue parsing as normal. DeviceDiagBuilder diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID); DeviceDiagBuilder targetDiag(SourceLocation Loc, unsigned DiagID); enum CUDAFunctionTarget { CFT_Device, CFT_Global, CFT_Host, CFT_HostDevice, CFT_InvalidTarget }; /// Determines whether the given function is a CUDA device/host/kernel/etc. /// function. /// /// Use this rather than examining the function's attributes yourself -- you /// will get it wrong. Returns CFT_Host if D is null. CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D, bool IgnoreImplicitHDAttr = false); CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs); /// Gets the CUDA target for the current context. CUDAFunctionTarget CurrentCUDATarget() { return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext)); } // CUDA function call preference. Must be ordered numerically from // worst to best. enum CUDAFunctionPreference { CFP_Never, // Invalid caller/callee combination. CFP_WrongSide, // Calls from host-device to host or device // function that do not match current compilation // mode. CFP_HostDevice, // Any calls to host/device functions. CFP_SameSide, // Calls from host-device to host or device // function matching current compilation mode. CFP_Native, // host-to-host or device-to-device calls. }; /// Identifies relative preference of a given Caller/Callee /// combination, based on their host/device attributes. /// \param Caller function which needs address of \p Callee. /// nullptr in case of global context. /// \param Callee target function /// /// \returns preference value for particular Caller/Callee combination. CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller, const FunctionDecl *Callee); /// Determines whether Caller may invoke Callee, based on their CUDA /// host/device attributes. Returns false if the call is not allowed. /// /// Note: Will return true for CFP_WrongSide calls. These may appear in /// semantically correct CUDA programs, but only if they're never codegen'ed. bool IsAllowedCUDACall(const FunctionDecl *Caller, const FunctionDecl *Callee) { return IdentifyCUDAPreference(Caller, Callee) != CFP_Never; } /// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD, /// depending on FD and the current compilation settings. void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD, const LookupResult &Previous); public: /// Check whether we're allowed to call Callee from the current context. /// /// - If the call is never allowed in a semantically-correct program /// (CFP_Never), emits an error and returns false. /// /// - If the call is allowed in semantically-correct programs, but only if /// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to /// be emitted if and when the caller is codegen'ed, and returns true. /// /// Will only create deferred diagnostics for a given SourceLocation once, /// so you can safely call this multiple times without generating duplicate /// deferred errors. /// /// - Otherwise, returns true without emitting any diagnostics. bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee); /// Set __device__ or __host__ __device__ attributes on the given lambda /// operator() method. /// /// CUDA lambdas declared inside __device__ or __global__ functions inherit /// the __device__ attribute. Similarly, lambdas inside __host__ __device__ /// functions become __host__ __device__ themselves. void CUDASetLambdaAttrs(CXXMethodDecl *Method); /// Finds a function in \p Matches with highest calling priority /// from \p Caller context and erases all functions with lower /// calling priority. void EraseUnwantedCUDAMatches( const FunctionDecl *Caller, SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches); /// Given a implicit special member, infer its CUDA target from the /// calls it needs to make to underlying base/field special members. /// \param ClassDecl the class for which the member is being created. /// \param CSM the kind of special member. /// \param MemberDecl the special member itself. /// \param ConstRHS true if this is a copy operation with a const object on /// its RHS. /// \param Diagnose true if this call should emit diagnostics. /// \return true if there was an error inferring. /// The result of this call is implicit CUDA target attribute(s) attached to /// the member declaration. bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl, CXXSpecialMember CSM, CXXMethodDecl *MemberDecl, bool ConstRHS, bool Diagnose); /// \return true if \p CD can be considered empty according to CUDA /// (E.2.3.1 in CUDA 7.5 Programming guide). bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD); bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD); // \brief Checks that initializers of \p Var satisfy CUDA restrictions. In // case of error emits appropriate diagnostic and invalidates \p Var. // // \details CUDA allows only empty constructors as initializers for global // variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all // __shared__ variables whether they are local or not (they all are implicitly // static in CUDA). One exception is that CUDA allows constant initializers // for __constant__ and __device__ variables. void checkAllowedCUDAInitializer(VarDecl *VD); /// Check whether NewFD is a valid overload for CUDA. Emits /// diagnostics and invalidates NewFD if not. void checkCUDATargetOverload(FunctionDecl *NewFD, const LookupResult &Previous); /// Copies target attributes from the template TD to the function FD. void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD); /// Returns the name of the launch configuration function. This is the name /// of the function that will be called to configure kernel call, with the /// parameters specified via <<<>>>. std::string getCudaConfigureFuncName() const; /// \name Code completion //@{ /// Describes the context in which code completion occurs. enum ParserCompletionContext { /// Code completion occurs at top-level or namespace context. PCC_Namespace, /// Code completion occurs within a class, struct, or union. PCC_Class, /// Code completion occurs within an Objective-C interface, protocol, /// or category. PCC_ObjCInterface, /// Code completion occurs within an Objective-C implementation or /// category implementation PCC_ObjCImplementation, /// Code completion occurs within the list of instance variables /// in an Objective-C interface, protocol, category, or implementation. PCC_ObjCInstanceVariableList, /// Code completion occurs following one or more template /// headers. PCC_Template, /// Code completion occurs following one or more template /// headers within a class. PCC_MemberTemplate, /// Code completion occurs within an expression. PCC_Expression, /// Code completion occurs within a statement, which may /// also be an expression or a declaration. PCC_Statement, /// Code completion occurs at the beginning of the /// initialization statement (or expression) in a for loop. PCC_ForInit, /// Code completion occurs within the condition of an if, /// while, switch, or for statement. PCC_Condition, /// Code completion occurs within the body of a function on a /// recovery path, where we do not have a specific handle on our position /// in the grammar. PCC_RecoveryInFunction, /// Code completion occurs where only a type is permitted. PCC_Type, /// Code completion occurs in a parenthesized expression, which /// might also be a type cast. PCC_ParenthesizedExpression, /// Code completion occurs within a sequence of declaration /// specifiers within a function, method, or block. PCC_LocalDeclarationSpecifiers }; void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path); void CodeCompleteOrdinaryName(Scope *S, ParserCompletionContext CompletionContext); void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS, bool AllowNonIdentifiers, bool AllowNestedNameSpecifiers); struct CodeCompleteExpressionData; void CodeCompleteExpression(Scope *S, const CodeCompleteExpressionData &Data); void CodeCompleteExpression(Scope *S, QualType PreferredType, bool IsParenthesized = false); void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase, SourceLocation OpLoc, bool IsArrow, bool IsBaseExprStatement, QualType PreferredType); void CodeCompletePostfixExpression(Scope *S, ExprResult LHS, QualType PreferredType); void CodeCompleteTag(Scope *S, unsigned TagSpec); void CodeCompleteTypeQualifiers(DeclSpec &DS); void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D, const VirtSpecifiers *VS = nullptr); void CodeCompleteBracketDeclarator(Scope *S); void CodeCompleteCase(Scope *S); /// Reports signatures for a call to CodeCompleteConsumer and returns the /// preferred type for the current argument. Returned type can be null. QualType ProduceCallSignatureHelp(Scope *S, Expr *Fn, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceConstructorSignatureHelp(Scope *S, QualType Type, SourceLocation Loc, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceCtorInitMemberSignatureHelp(Scope *S, Decl *ConstructorDecl, CXXScopeSpec SS, ParsedType TemplateTypeTy, ArrayRef<Expr *> ArgExprs, IdentifierInfo *II, SourceLocation OpenParLoc); void CodeCompleteInitializer(Scope *S, Decl *D); void CodeCompleteAfterIf(Scope *S); void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext, QualType BaseType); void CodeCompleteUsing(Scope *S); void CodeCompleteUsingDirective(Scope *S); void CodeCompleteNamespaceDecl(Scope *S); void CodeCompleteNamespaceAliasDecl(Scope *S); void CodeCompleteOperatorName(Scope *S); void CodeCompleteConstructorInitializer( Decl *Constructor, ArrayRef<CXXCtorInitializer *> Initializers); void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro, bool AfterAmpersand); void CodeCompleteObjCAtDirective(Scope *S); void CodeCompleteObjCAtVisibility(Scope *S); void CodeCompleteObjCAtStatement(Scope *S); void CodeCompleteObjCAtExpression(Scope *S); void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS); void CodeCompleteObjCPropertyGetter(Scope *S); void CodeCompleteObjCPropertySetter(Scope *S); void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS, bool IsParameter); void CodeCompleteObjCMessageReceiver(Scope *S); void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression); void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, bool IsSuper = false); void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, ObjCInterfaceDecl *Super = nullptr); void CodeCompleteObjCForCollection(Scope *S, DeclGroupPtrTy IterationVar); void CodeCompleteObjCSelector(Scope *S, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCProtocolReferences( ArrayRef<IdentifierLocPair> Protocols); void CodeCompleteObjCProtocolDecl(Scope *S); void CodeCompleteObjCInterfaceDecl(Scope *S); void CodeCompleteObjCSuperclass(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationDecl(Scope *S); void CodeCompleteObjCInterfaceCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCPropertyDefinition(Scope *S); void CodeCompleteObjCPropertySynthesizeIvar(Scope *S, IdentifierInfo *PropertyName); void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod, ParsedType ReturnType); void CodeCompleteObjCMethodDeclSelector(Scope *S, bool IsInstanceMethod, bool AtParameterName, ParsedType ReturnType, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName, SourceLocation ClassNameLoc, bool IsBaseExprStatement); void CodeCompletePreprocessorDirective(bool InConditional); void CodeCompleteInPreprocessorConditionalExclusion(Scope *S); void CodeCompletePreprocessorMacroName(bool IsDefinition); void CodeCompletePreprocessorExpression(); void CodeCompletePreprocessorMacroArgument(Scope *S, IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned Argument); void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled); void CodeCompleteNaturalLanguage(); void CodeCompleteAvailabilityPlatformName(); void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator, CodeCompletionTUInfo &CCTUInfo, SmallVectorImpl<CodeCompletionResult> &Results); //@} //===--------------------------------------------------------------------===// // Extra semantic analysis beyond the C type system public: SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL, unsigned ByteNo) const; private: void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, const ArraySubscriptExpr *ASE=nullptr, bool AllowOnePastEnd=true, bool IndexNegated=false); void CheckArrayAccess(const Expr *E); // Used to grab the relevant information from a FormatAttr and a // FunctionDeclaration. struct FormatStringInfo { unsigned FormatIdx; unsigned FirstDataArg; bool HasVAListArg; }; static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, FormatStringInfo *FSI); bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc, ArrayRef<const Expr *> Args); bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto); void CheckConstructorCall(FunctionDecl *FDecl, ArrayRef<const Expr *> Args, const FunctionProtoType *Proto, SourceLocation Loc); void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, const Expr *ThisArg, ArrayRef<const Expr *> Args, bool IsMemberFunction, SourceLocation Loc, SourceRange Range, VariadicCallType CallType); bool CheckObjCString(Expr *Arg); ExprResult CheckOSLogFormatStringArg(Expr *Arg); ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, CallExpr *TheCall); void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall); bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, unsigned MaxWidth); bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinCpu(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call); bool SemaBuiltinUnorderedCompare(CallExpr *TheCall); bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs); bool SemaBuiltinVSX(CallExpr *TheCall); bool SemaBuiltinOSLogFormat(CallExpr *TheCall); public: // Used by C++ template instantiation. ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall); ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, SourceLocation BuiltinLoc, SourceLocation RParenLoc); private: bool SemaBuiltinPrefetch(CallExpr *TheCall); bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall); bool SemaBuiltinAssume(CallExpr *TheCall); bool SemaBuiltinAssumeAligned(CallExpr *TheCall); bool SemaBuiltinLongjmp(CallExpr *TheCall); bool SemaBuiltinSetjmp(CallExpr *TheCall); ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult); ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult); ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult, AtomicExpr::AtomicOp Op); ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult, bool IsDelete); bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, llvm::APSInt &Result); bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low, int High, bool RangeIsError = true); bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum, unsigned Multiple); bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, int ArgNum, unsigned ExpectedFieldNum, bool AllowName); bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall); public: enum FormatStringType { FST_Scanf, FST_Printf, FST_NSString, FST_Strftime, FST_Strfmon, FST_Kprintf, FST_FreeBSDKPrintf, FST_OSTrace, FST_OSLog, FST_Unknown }; static FormatStringType GetFormatStringType(const FormatAttr *Format); bool FormatStringHasSArg(const StringLiteral *FExpr); static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx); private: bool CheckFormatArguments(const FormatAttr *Format, ArrayRef<const Expr *> Args, bool IsCXXMember, VariadicCallType CallType, SourceLocation Loc, SourceRange Range, llvm::SmallBitVector &CheckedVarArgs); bool CheckFormatArguments(ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, VariadicCallType CallType, SourceLocation Loc, SourceRange range, llvm::SmallBitVector &CheckedVarArgs); void CheckAbsoluteValueFunction(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMemaccessArguments(const CallExpr *Call, unsigned BId, IdentifierInfo *FnName); void CheckStrlcpycatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckStrncatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckReturnValExpr(Expr *RetValExp, QualType lhsType, SourceLocation ReturnLoc, bool isObjCMethod = false, const AttrVec *Attrs = nullptr, const FunctionDecl *FD = nullptr); public: void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS); private: void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation()); void CheckBoolLikeConversion(Expr *E, SourceLocation CC); void CheckForIntOverflow(Expr *E); void CheckUnsequencedOperations(Expr *E); /// Perform semantic checks on a completed expression. This will either /// be a full-expression or a default argument expression. void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(), bool IsConstexpr = false); void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field, Expr *Init); /// Check if there is a field shadowing. void CheckShadowInheritedFields(const SourceLocation &Loc, DeclarationName FieldName, const CXXRecordDecl *RD, bool DeclIsField = true); /// Check if the given expression contains 'break' or 'continue' /// statement that produces control flow different from GCC. void CheckBreakContinueBinding(Expr *E); /// Check whether receiver is mutable ObjC container which /// attempts to add itself into the container void CheckObjCCircularContainer(ObjCMessageExpr *Message); void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE); void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc, bool DeleteWasArrayForm); public: /// Register a magic integral constant to be used as a type tag. void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, uint64_t MagicValue, QualType Type, bool LayoutCompatible, bool MustBeNull); struct TypeTagData { TypeTagData() {} TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) : Type(Type), LayoutCompatible(LayoutCompatible), MustBeNull(MustBeNull) {} QualType Type; /// If true, \c Type should be compared with other expression's types for /// layout-compatibility. unsigned LayoutCompatible : 1; unsigned MustBeNull : 1; }; /// A pair of ArgumentKind identifier and magic value. This uniquely /// identifies the magic value. typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue; private: /// A map from magic value to type information. std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>> TypeTagForDatatypeMagicValues; /// Peform checks on a call of a function with argument_with_type_tag /// or pointer_with_type_tag attributes. void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, const ArrayRef<const Expr *> ExprArgs, SourceLocation CallSiteLoc); /// Check if we are taking the address of a packed field /// as this may be a problem if the pointer value is dereferenced. void CheckAddressOfPackedMember(Expr *rhs); /// The parser's current scope. /// /// The parser maintains this state here. Scope *CurScope; mutable IdentifierInfo *Ident_super; mutable IdentifierInfo *Ident___float128; /// Nullability type specifiers. IdentifierInfo *Ident__Nonnull = nullptr; IdentifierInfo *Ident__Nullable = nullptr; IdentifierInfo *Ident__Null_unspecified = nullptr; IdentifierInfo *Ident_NSError = nullptr; /// The handler for the FileChanged preprocessor events. /// /// Used for diagnostics that implement custom semantic analysis for #include /// directives, like -Wpragma-pack. sema::SemaPPCallbacks *SemaPPCallbackHandler; protected: friend class Parser; friend class InitializationSequence; friend class ASTReader; friend class ASTDeclReader; friend class ASTWriter; public: /// Retrieve the keyword associated IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability); /// The struct behind the CFErrorRef pointer. RecordDecl *CFError = nullptr; /// Retrieve the identifier "NSError". IdentifierInfo *getNSErrorIdent(); /// Retrieve the parser's current scope. /// /// This routine must only be used when it is certain that semantic analysis /// and the parser are in precisely the same context, which is not the case /// when, e.g., we are performing any kind of template instantiation. /// Therefore, the only safe places to use this scope are in the parser /// itself and in routines directly invoked from the parser and *never* from /// template substitution or instantiation. Scope *getCurScope() const { return CurScope; } void incrementMSManglingNumber() const { return CurScope->incrementMSManglingNumber(); } IdentifierInfo *getSuperIdentifier() const; IdentifierInfo *getFloat128Identifier() const; Decl *getObjCDeclContext() const; DeclContext *getCurLexicalContext() const { return OriginalLexicalContext ? OriginalLexicalContext : CurContext; } const DeclContext *getCurObjCLexicalContext() const { const DeclContext *DC = getCurLexicalContext(); // A category implicitly has the attribute of the interface. if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC)) DC = CatD->getClassInterface(); return DC; } /// To be used for checking whether the arguments being passed to /// function exceeds the number of parameters expected for it. static bool TooManyArguments(size_t NumParams, size_t NumArgs, bool PartialOverloading = false) { // We check whether we're just after a comma in code-completion. if (NumArgs > 0 && PartialOverloading) return NumArgs + 1 > NumParams; // If so, we view as an extra argument. return NumArgs > NumParams; } // Emitting members of dllexported classes is delayed until the class // (including field initializers) is fully parsed. SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses; private: class SavePendingParsedClassStateRAII { public: SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); } ~SavePendingParsedClassStateRAII() { assert(S.DelayedOverridingExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); assert(S.DelayedEquivalentExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); assert(S.DelayedDllExportClasses.empty() && "there shouldn't be any pending delayed DLL export classes"); swapSavedState(); } private: Sema &S; decltype(DelayedOverridingExceptionSpecChecks) SavedOverridingExceptionSpecChecks; decltype(DelayedEquivalentExceptionSpecChecks) SavedEquivalentExceptionSpecChecks; decltype(DelayedDllExportClasses) SavedDllExportClasses; void swapSavedState() { SavedOverridingExceptionSpecChecks.swap( S.DelayedOverridingExceptionSpecChecks); SavedEquivalentExceptionSpecChecks.swap( S.DelayedEquivalentExceptionSpecChecks); SavedDllExportClasses.swap(S.DelayedDllExportClasses); } }; /// Helper class that collects misaligned member designations and /// their location info for delayed diagnostics. struct MisalignedMember { Expr *E; RecordDecl *RD; ValueDecl *MD; CharUnits Alignment; MisalignedMember() : E(), RD(), MD(), Alignment() {} MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment) : E(E), RD(RD), MD(MD), Alignment(Alignment) {} explicit MisalignedMember(Expr *E) : MisalignedMember(E, nullptr, nullptr, CharUnits()) {} bool operator==(const MisalignedMember &m) { return this->E == m.E; } }; /// Small set of gathered accesses to potentially misaligned members /// due to the packed attribute. SmallVector<MisalignedMember, 4> MisalignedMembers; /// Adds an expression to the set of gathered misaligned members. void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment); public: /// Diagnoses the current set of gathered accesses. This typically /// happens at full expression level. The set is cleared after emitting the /// diagnostics. void DiagnoseMisalignedMembers(); /// This function checks if the expression is in the sef of potentially /// misaligned members and it is converted to some pointer type T with lower /// or equal alignment requirements. If so it removes it. This is used when /// we do not want to diagnose such misaligned access (e.g. in conversions to /// void*). void DiscardMisalignedMemberAddress(const Type *T, Expr *E); /// This function calls Action when it determines that E designates a /// misaligned member due to the packed attribute. This is used to emit /// local diagnostics like in reference binding. void RefersToMemberWithReducedAlignment( Expr *E, llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)> Action); /// Describes the reason a calling convention specification was ignored, used /// for diagnostics. enum class CallingConventionIgnoredReason { ForThisTarget = 0, VariadicFunction, ConstructorDestructor, BuiltinFunction }; }; /// RAII object that enters a new expression evaluation context. class EnterExpressionEvaluationContext { Sema &Actions; bool Entered = true; public: EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other, bool ShouldEnter = true) : Actions(Actions), Entered(ShouldEnter) { if (Entered) Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl, ExprContext); } EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Sema::ReuseLambdaContextDecl_t, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other) : Actions(Actions) { Actions.PushExpressionEvaluationContext( NewContext, Sema::ReuseLambdaContextDecl, ExprContext); } enum InitListTag { InitList }; EnterExpressionEvaluationContext(Sema &Actions, InitListTag, bool ShouldEnter = true) : Actions(Actions), Entered(false) { // In C++11 onwards, narrowing checks are performed on the contents of // braced-init-lists, even when they occur within unevaluated operands. // Therefore we still need to instantiate constexpr functions used in such // a context. if (ShouldEnter && Actions.isUnevaluatedContext() && Actions.getLangOpts().CPlusPlus11) { Actions.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::UnevaluatedList); Entered = true; } } ~EnterExpressionEvaluationContext() { if (Entered) Actions.PopExpressionEvaluationContext(); } }; DeductionFailureInfo MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK, sema::TemplateDeductionInfo &Info); /// Contains a late templated function. /// Will be parsed at the end of the translation unit, used by Sema & Parser. struct LateParsedTemplate { CachedTokens Toks; /// The template function declaration to be late parsed. Decl *D; }; } // end namespace clang namespace llvm { // Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its // SourceLocation. template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> { using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc; using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>; static FunctionDeclAndLoc getEmptyKey() { return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()}; } static FunctionDeclAndLoc getTombstoneKey() { return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()}; } static unsigned getHashValue(const FunctionDeclAndLoc &FDL) { return hash_combine(FDBaseInfo::getHashValue(FDL.FD), FDL.Loc.getRawEncoding()); } static bool isEqual(const FunctionDeclAndLoc &LHS, const FunctionDeclAndLoc &RHS) { return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc; } }; } // namespace llvm #endif
quicksort_hybrid.c
/* * Recursive hybrid-parallel implementation of Quicksort (not optimized!) * This code is to be used in conjunction with exercises in module [B1] Hybrid Algorithm * * Need OpenMP 5.0 and GCC 11 to compiler (recursive offloading) * * @author: Apan Qasem <apan@txstate.edu> * @date: 04/02/20 * * @update: 03/13/21 */ #include<stdlib.h> #include<stdio.h> #include<omp.h> #define VAL_RANGE 1024 #define ELEMENTS_TO_VERIFY 5 void swap(double *x, double *y) { double tmp; tmp = (*x); (*x) = (*y); (*y) = tmp; return; } /* * partition array for quicksort * - move pivot to far right * - accumulate values smaller than pivot to the left */ int partition(double values[], int left, int right, int pivotIndex) { double pivotValue = values[pivotIndex]; swap(&values[pivotIndex],&values[right]); // Move pivot to end int storeIndex = left; for(int i = left; i < right; i++) { if (values[i] < pivotValue) { swap(&values[i],&values[storeIndex]); storeIndex++; } } swap(&values[storeIndex],&values[right]); // Move pivot to its final place return storeIndex; } /* * recursive quicksort */ void quickSort(double values[], int left, int right) { #pragma omp parallel { #pragma omp single { if (left < right) { int pivotIndex = (left + right)/2; int pivotNewIndex = partition(values, left, right, pivotIndex); #pragma omp target quickSort(values, left, pivotNewIndex - 1); #pragma omp task quickSort(values, pivotNewIndex + 1, right); } } } return; } /* * display array contents */ void display(double values[], long long N) { for (int i = 0; i < N; i++) fprintf(stdout, "%3.4f ", values[i]); fprintf(stdout, "\n"); } int main(int argc, char *argv[]) { if (argc < 3) { printf("usage: \n"); printf(" ./quicksort N threads\n"); printf(" N = input size\n"); printf(" t = number of OpenMP threads\n"); exit(0); } long long N = atoi(argv[1]); unsigned threads = atoi(argv[2]); omp_set_num_threads(threads); double *values = (double *) malloc(sizeof(double) * N); for (int i = 0; i < N; i++) values[i] = rand() / (double) (RAND_MAX/VAL_RANGE); quickSort(values, 0, N - 1); fprintf(stdout, "Sorted values [0..%d]: ", ELEMENTS_TO_VERIFY - 1); display(values, ELEMENTS_TO_VERIFY); return 0; }
gm_order.h
#ifndef GM_ORDER_H #define GM_ORDER_H #include <list> #include "gm_internal.h" #include "gm_bitmap.h" template<typename T> class gm_order { public: gm_order(int _max_sz, int _max_thread = 16) : max_thread(_max_thread), max_sz(_max_sz) { local_Q_front = new std::list<T>[max_thread]; local_Q_back = new std::list<T>[max_thread]; bitmap = new unsigned char[(max_sz + 7) / 8]; for (int i = 0; i < (max_sz + 7) / 8; i++) bitmap[i] = 0; } virtual ~gm_order() { delete[] local_Q_front; delete[] local_Q_back; delete[] bitmap; } //------------------------------------------------------------ // API // push_back/front, pop_back/front, clear, get_size // push has separate parallel interface //------------------------------------------------------------ void push_back(T e) // sequential { if (!_gm_get_bit(bitmap, e)) { _gm_set_bit(bitmap, e); Q.push_back(e); } } void push_front(T e) { if (!_gm_get_bit(bitmap, e)) { _gm_set_bit(bitmap, e); Q.push_front(e); } } T pop_back() { T e = Q.back(); _gm_clear_bit(bitmap, e); Q.pop_back(); return e; } T pop_front() { T e = Q.front(); _gm_clear_bit(bitmap, e); Q.pop_front(); return e; } void clear() { Q.clear(); #pragma omp parallel for for (int i = 0; i < (max_sz + 7) / 8; i++) bitmap[i] = 0; } size_t get_size() { return Q.size(); } bool is_in(T e) { return (_gm_get_bit(bitmap, e) == 1); } // for parallel execution void push_back_par(T e, int tid) { if (!_gm_get_bit(bitmap, e)) { // test and atomic if (_gm_set_bit_atomic(bitmap, e)) { local_Q_back[tid].push_back(e); } } } void push_front_par(T e, int tid) { if (!_gm_get_bit(bitmap, e)) { // test and atomic if (_gm_set_bit_atomic(bitmap, e)) { local_Q_back[tid].push_front(e); } } } //------------------------------------------- // called when parallel addition is finished //------------------------------------------- void merge() { for (int i = 0; i < max_thread; i++) { if (local_Q_front[i].size() > 0) Q.splice(Q.begin(), local_Q_front[i]); if (local_Q_back[i].size() > 0) Q.splice(Q.end(), local_Q_back[i]); } } // for sequential iteration typename std::list<T>& get_list() { return Q; } //----------------------------------------------- // for iteration //----------------------------------------------- // todo, correctly use nested template def #define ITERATOR_CLASS(CLASS_NAME, LIST_ITER_TYPE) \ class CLASS_NAME {\ public: \ CLASS_NAME(typename LIST_ITER_TYPE I, typename LIST_ITER_TYPE E) \ : ITER(I), END_ITER(E) {} \ inline bool has_next() { \ return (ITER != END_ITER); \ } \ inline T get_next() \ { T t = *ITER; ITER++; return t;} \ private: \ typename LIST_ITER_TYPE ITER; \ typename LIST_ITER_TYPE END_ITER; \ }; ITERATOR_CLASS(seq_iter, std::list<T>::iterator) ;ITERATOR_CLASS(rev_iter, std::list<T>::reverse_iterator) ; #undef ITERATOR_CLASS class par_iter { public: par_iter(typename std::list<T>::iterator I, typename std::list<T>::iterator E) : ITER(I), END_ITER(E), is_small(true), bitmap(NULL) { } par_iter(unsigned char* B, T I, T E) : bitmap(B), ITER(I), END_ITER(E), is_small(false) { } inline bool has_next() { if (is_small) return (ITER != END_ITER); else { while (IDX < END_IDX) { if (_gm_check_bit(bitmap, IDX) == 0) return true; IDX++; } return false; } } inline T get_next() { if (is_small) { T t = *ITER; ITER++; return t; } else { return IDX++; } } private: bool is_small; unsigned char* bitmap; typename std::set<T>::iterator ITER; // for small instance use typename std::set<T>::iterator END_ITER; // for small instance use T IDX; T END_IDX; }; seq_iter prepare_seq_iteration() { seq_iter I(Q.begin(), Q.end()); return I; } rev_iter prepare_rev_iteration() { rev_iter I(Q.rbegin(), Q.rend()); return I; } par_iter prepare_par_iteration(int thread_id, int max_threads) { bool is_small = (Q.size() < THRESHOLD_LARGE); if (is_small) { // for small instance, use single thread if (thread_id == 0) { par_iter I(Q.begin(), Q.end()); return I; } else { par_iter I(Q.end(), Q.end()); return I; } } else { size_t cnt = max_sz / max_threads; T begin = cnt * thread_id; T end = (thread_id == (max_threads - 1)) ? max_sz : begin + cnt; par_iter I(bitmap, begin, end); return I; } } private: gm_order() : max_sz(-1), max_thread(-1), bitmap(NULL), local_Q_front(NULL), local_Q_back(NULL) { } // initialize without size is prohibited typename std::list<T> Q; typename std::list<T>* local_Q_front; typename std::list<T>* local_Q_back; int max_thread; int max_sz; unsigned char* bitmap; static const int THRESHOLD_LARGE = 4096; }; typedef gm_order<node_t> gm_node_order; typedef gm_order<edge_t> gm_edge_order; #endif
exm.c
// SPDX-License-Identifier: BSD-2-Clause /* Copyright 1998-2002 Bernard Parent Copyright 2019 Jaehyuk Lee Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* EXM: External Module Functions that can be used with CFDWARP or any other code */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <pthread.h> #include <sys/ioctl.h> #include <unistd.h> #include <stdarg.h> #include "exm.h" void EXM_fatal_error(const char *formatstr, ...){ va_list ap; char *newstr; int term_width,term_height; newstr=(char *)malloc(10000*sizeof(char)); fprintf(stderr,"\n\n"); va_start(ap, formatstr); vsprintf(newstr,formatstr, ap); va_end(ap); find_terminal_window_size(&term_width,&term_height); fprintf(stderr,"%s",strwrp(newstr,min(term_width-1,70))); free(newstr); fprintf(stderr,"\n\nEXM fatal error. Exiting.\n\n"); exit(EXIT_FAILURE); } long EXM_ai3(EXM_gl3D_t gl, long i, long j, long k) { long ii; long isr,jsr,jer,ksr,ker; isr=gl.is; jsr=gl.js; jer=gl.je; ksr=gl.ks; ker=gl.ke; ii=(i-isr)*(jer-jsr+1)+(j-jsr); ii=ii*(ker-ksr+1)+(k-ksr); return(ii); } long EXM_ai2(EXM_gl2D_t gl, long i, long j) { long ii; long isr,jsr,jer; isr=gl.is; jsr=gl.js; jer=gl.je; ii=(i-isr)*(jer-jsr+1)+(j-jsr); return(ii); } long EXM_ai1(EXM_gl1D_t gl, long i) { long ii; long isr; isr=gl.is; ii=(i-isr); return(ii); } long EXM_aim(EXM_glm_t gl, long row, long col) { long ii; ii=row*gl.numcol+col; return(ii); } /*=========================================================================== find root solver ============================================================================*/ double SIGN(NUM1, NUM2) double NUM1, NUM2; { double NUM3; double tmp; if (NUM2<0.0e0) tmp=-1.0e0; else tmp=1.0e0; NUM3 = NUM1 * tmp; return NUM3; } /* BASED ON A METHOD BY T J DEKKER WRITTEN BY L F SHAMPINE AND H A WATTS MODIFIED FOR THE MATH LIBRARY BY C B BAILEY TRANSLATED FROM FORTRAN TO PASCAL TO C BY B PARENT ABSTRACT EXM_find_root_zero_in SEARCHES FOR A ZERO OF A FUNCTION F(X) BETWEEN THE GIVEN VALUES B AND C UNTIL THE WIDTH OF THE INTERVAL (B,C) HAS COLLAPSED TO WITHIN A TOLERANCE SPECIFIED BY THE STOPPING CRITERION, ABS(B-C) .LE. 2.*(RW*ABS(B)+AE). THE METHOD USED IS AN EFFICIENT COMBINATION OF BISECTION AND THE SECANT RULE. IN ORDER TO INSURE THAT EXM_find_root_zero_in WILL CONVERGE TO A ZERO, THE USER SHOULD PICK VALUES FOR B AND C AT WHICH THE FUNCTION DIFFERS IN SIGN. DESCRIPTION OF ARGUMENTS F,B,C,RE AND AE ARE INPUT PARAMETERS B,C AND IFLAG ARE OUTPUT PARAMETERS F - NAME OF THE REAL VALUED EXTERNAL FUNCTION. THIS NAME MUST BE IN AN EXTERNAL STATEMENT IN THE CALLING PROGRAM. F MUST BE A FUNCTION OF ONE REAL ARGUMENT. minval- ONE END OF THE INTERVAL (B,C). THE VALUE RETURNED FOR B USUALLY IS THE BETTER APPROXIMATION TO A ZERO OF F. maxval- THE OTHER END OF THE INTERVAL (B,C) relerr- RELATIVE ERROR USED FOR RW IN THE STOPPING CRITERION. IF THE REQUESTED RE IS LESS THAN MACHINE PRECISION, THEN RW IS SET TO APPROXIMATELY MACHINE PRECISION. abserr- ABSOLUTE ERROR USED IN THE STOPPING CRITERION. IF THE GIVEN INTERVAL (B,C) CONTAINS THE ORIGIN, THEN A NONZERO VALUE SHOULD BE CHOSEN FOR AE. IFLAG - A STATUS CODE. USER MUST CHECK IFLAG AFTER EACH CALL. CONTROL RETURNS TO THE USER FROM EXM_find_root_zero_in IN ALL CASES. XERROR DOES NOT PROCESS DIAGNOSTICS IN THESE CASES. 1 B IS WITHIN THE REQUESTED TOLERANCE OF A ZERO. THE INTERVAL (B,C) COLLAPSED TO THE REQUESTED TOLERANCE, THE FUNCTION CHANGES SIGN IN (B,C), AND F(X) DECREASED IN MAGNITUDE AS (B,C) COLLAPSED. 2 F(B) = 0. HOWEVER, THE INTERVAL (B,C) MAY NOT HAVE COLLAPSED TO THE REQUESTED TOLERANCE. 3 B MAY BE NEAR A SINGULAR POINT OF F(X). THE INTERVAL (B,C) COLLAPSED TO THE REQUESTED TOLERANCE AND THE FUNCTION CHANGES SIGN IN (B,C) BUT F(X) INCREASED IN MAGNITUDE AS (B,C) COLLAPSED,I.E. ABS(F(B OUT)) .GT. MAX(ABS(F(B IN)),ABS(F(C IN))) 4 NO CHANGE IN SIGN OF F(X) WAS FOUND ALTHOUGH THE INTERVAL (B,C) COLLAPSED TO THE REQUESTED TOLERANCE. THE USER MUST EXAMINE THIS CASE AND DECIDE WHETHER B IS NEAR A LOCAL MINIMUM OF F(X), OR B IS NEAR A ZERO OF EVEN MULTIPLICITY, OR NEITHER OF THESE. 5 TOO MANY (.GT. 500) FUNCTION EVALUATIONS USED. REFERENCES 1. L F SHAMPINE AND H A WATTS, EXM_find_root_zero_in, A ROOT-SOLVING CODE, SC-TM-70-631, SEPT 1970. 2. T J DEKKER, FINDING A ZERO BY MEANS OF SUCCESSIVE LINEAR INTERPOLATION, *CONSTRUCTIVE ASPECTS OF THE FUNDAMENTAL THEOREM OF ALGEBRA*, EDITED BY B DEJON AND P HENRICI, 1969. ER IS TWO TIMES THE COMPUTER UNIT ROUNDOFF VALUE WHICH IS DEFINED HERE TO BE THE VALUE FOR THE IBM PC DOUBLE PRECISION*/ double EXM_find_root_zero_in(double(*FUNCT)(void *, double), void *arg_ptr, double minval, double maxval, double relerr, double abserr, long *IFLAG){ double T, A, P, U, TOL, ACMB, CMB, FX, FC, FB, FA, ACBS, ER, RW, AW; long IC, KOUNT; *IFLAG = 0; ER = 2.0e-13; RW = max(relerr, ER); AW = max(abserr, 0.0e0); IC = 0; ACBS = fabs(minval - maxval); A = maxval; T = A; FA = (*FUNCT)(arg_ptr,T); T = minval; FB = (*FUNCT)(arg_ptr,T); FC = FA; KOUNT = 2; FX = max(fabs(FB), fabs(FC)); _L1: if (fabs(FC) > fabs(FB)) goto _L2; A = minval; FA = FB; minval = maxval; FB = FC; maxval = A; FC = FA; _L2: if (FB == 0.0e0) *IFLAG = 2; CMB = 0.5e0 * (maxval - minval); ACMB = fabs(CMB); TOL = RW * fabs(minval) + AW; if (ACMB <= TOL) { *IFLAG = 1; if (SIGN(1.0e0, FB) == SIGN(1.0e0, FC)) *IFLAG = 4; if (fabs(FB) > FX) *IFLAG = 3; } P = (minval - A) * FB; U = FA - FB; if (P >= 0.0e0) goto _L3; P = -P; U = -U; _L3: A = minval; FA = FB; IC++; if (IC < 4) goto _L4; if (8.0e0 * ACMB >= ACBS) goto _L6; IC = 0; ACBS = ACMB; _L4: if (P > fabs(U) * TOL) goto _L5; minval += SIGN(TOL, CMB); goto _L7; _L5: if (P >= CMB * U) goto _L6; assert(U!=0.0e0); minval += P / U; goto _L7; _L6: minval = 0.5e0 * (maxval + minval); _L7: T = minval; FB = (*FUNCT)(arg_ptr,T); if (FB == 0.0e0) *IFLAG = 2; if (SIGN(1.0e0, FB) != SIGN(1.0e0, FC)) goto _L8; maxval = A; FC = FA; _L8: KOUNT++; if (KOUNT > 500) *IFLAG = 5; if (*IFLAG == 0) goto _L1; return(minval); } /* root_guess: enter a value as a first guess of the root droot_init: a small value, usually set to about 10^8 times smaller than the maximum value of a root you would expect to obtain relerr: the maximum admissible relative error on the residual to obtain convergence abserr: the maximum admissible absolute error on the residual to obtain convergence *IFLAG= 1: convergence has been obtained correctly; 2: problem in convergence, too many iterations 3: droot_init can not be zero 4: problem finding adequate dx from droot_init (droot_init may be too small or too large) 5: the function provided returned NaN or a non finite number */ double EXM_find_root_Newton_Raphson( double(*FUNCT)(void *, double), void *arg_ptr, double root_guess, double droot_init, double relerr, double abserr, long *IFLAG){ double x1,x2,dx; double res1,res2,resref; long cnt; bool PROBLEM,NEEDEDADJUSTMENT; x1=0.0; *IFLAG=0; if (droot_init==0.0) *IFLAG=3; /* check if droot_init is proper and adjust it if required */ PROBLEM=FALSE; NEEDEDADJUSTMENT=FALSE; res1=(*FUNCT)(arg_ptr,x1); cnt=0; do { res2=(*FUNCT)(arg_ptr,x1+droot_init); if (res2-res1==0.0) { PROBLEM=TRUE; NEEDEDADJUSTMENT=TRUE; droot_init*=10.0; } else { PROBLEM=FALSE; } cnt++; } while (PROBLEM && cnt<=100); if (NEEDEDADJUSTMENT) droot_init*=100.0; if (cnt>=100) *IFLAG=4; resref=max(fabs(res2),max(fabs(res1),fabs((*FUNCT)(arg_ptr,x1+droot_init)))); if (isnan(resref) || !isfinite(resref)) *IFLAG=5; if (*IFLAG==0){ PROBLEM=FALSE; x1=root_guess; res1=(*FUNCT)(arg_ptr,x1); dx=droot_init; cnt=0; do { cnt++; x2=x1+dx; res2=(*FUNCT)(arg_ptr,x2); if (res2-res1==0.0 || x2-x1==0.0){ droot_init*=2.0; dx=droot_init; PROBLEM=TRUE; } else { PROBLEM=FALSE; dx=-res2/(res2-res1)*(x2-x1); } res1=res2; x1=x2; } while ((fabs(res1)>=abserr || PROBLEM) && (fabs(res1/resref)>=relerr || PROBLEM) && (cnt<=300)); *IFLAG=1; if (cnt>=300) *IFLAG=2; if (isnan(res1) || !isfinite(res1)) *IFLAG=5; } return(x1); } /*============================================================================ XDMA solver =============================================================================*/ /* #define maxXDMAthread 5 typedef struct { EXM_gl2D_t *gl; long hbw,line,line2min,line2max; double *xdma; } XDMAthreadptr_t; void *XDMAthreadfunct(void *XDMAthreadptr){ EXM_gl2D_t *gl; long col,hbw,line,line2min,line2max,line2,dc; double fact; double *xdma; xdma=((XDMAthreadptr_t *)XDMAthreadptr)->xdma; gl=((XDMAthreadptr_t *)XDMAthreadptr)->gl; hbw=((XDMAthreadptr_t *)XDMAthreadptr)->hbw; line=((XDMAthreadptr_t *)XDMAthreadptr)->line; line2min=((XDMAthreadptr_t *)XDMAthreadptr)->line2min; line2max=((XDMAthreadptr_t *)XDMAthreadptr)->line2max; for (line2=line2min; line2<=line2max; line2++){ dc=(line2-line); // here the idea is to add line*fact to line2, with dc added to the // column index of line fact=-xdma[EXM_ai2(*gl,hbw-dc,line2)]/xdma[EXM_ai2(*gl,hbw,line)]; for (col=hbw; col<gl->ie; col++){ xdma[EXM_ai2(*gl,col-dc,line2)]+=fact*xdma[EXM_ai2(*gl,col,line)]; } xdma[EXM_ai2(*gl,gl->ie,line2)]+=fact*xdma[EXM_ai2(*gl,gl->ie,line)]; } return(NULL); } // solves a x-diagonal matrix as defined by the pointer variable xdma with the size // in gl void EXM_SolveXDMAthread(double *xdma, EXM_gl2D_t gl){ long numXDMAthread,cntXDMAthread,hbw,line,line2,dc,linemax,col,line2min,line2max,numline; double fact; void *retval; pthread_t XDMAthread[maxXDMAthread]; XDMAthreadptr_t XDMAthreadptr[maxXDMAthread]; // NOTE: gl.js must be equal to zero // gl.is must be equal to zero hbw=(gl.ie-gl.is+1)/2-1; linemax=gl.je; for (line=0; line<linemax; line++){ line2min=line+1; line2max=min(line+hbw,linemax); cntXDMAthread=0; numline=max(1,round((line2max-line2min)/maxXDMAthread)); do { XDMAthreadptr[cntXDMAthread].gl=&gl; XDMAthreadptr[cntXDMAthread].hbw=hbw; XDMAthreadptr[cntXDMAthread].line=line; XDMAthreadptr[cntXDMAthread].xdma=xdma; XDMAthreadptr[cntXDMAthread].line2min=line2min+cntXDMAthread*numline; XDMAthreadptr[cntXDMAthread].line2max=min(line2max,line2min+(cntXDMAthread+1)*numline-1); if (cntXDMAthread==maxXDMAthread-1) XDMAthreadptr[cntXDMAthread].line2max=line2max; if (line==0 && FALSE) fprintf(stderr,"%ld %ld %ld %ld %ld %ld\n",line2min,line2max,XDMAthreadptr[cntXDMAthread].line2min,XDMAthreadptr[cntXDMAthread].line2max,hbw,numline); if (pthread_create(&((XDMAthread)[cntXDMAthread]), NULL, &XDMAthreadfunct, (void *)(&(XDMAthreadptr[cntXDMAthread])))) fprintf(stderr,"Cannot create XDMA thread.\n"); cntXDMAthread++; } while (cntXDMAthread<maxXDMAthread || XDMAthreadptr[cntXDMAthread-1].line2max!=line2max); numXDMAthread=cntXDMAthread; for (cntXDMAthread=0; cntXDMAthread<numXDMAthread; cntXDMAthread++){ if (pthread_join(XDMAthread[cntXDMAthread],&retval)) fprintf(stderr,"Cannot join XDMA thread %ld.\n",cntXDMAthread); } } for (line=linemax; line>0; line--){ for (line2=line-1; line2>=max(line-hbw,0); line2--){ dc=(line2-line); // here the idea is to add line*fact to line2, with dc added to the // column index of line fact=-xdma[EXM_ai2(gl,hbw-dc,line2)]/xdma[EXM_ai2(gl,hbw,line)]; col=hbw; xdma[EXM_ai2(gl,col-dc,line2)]+=fact*xdma[EXM_ai2(gl,col,line)]; xdma[EXM_ai2(gl,gl.ie,line2)]+=fact*xdma[EXM_ai2(gl,gl.ie,line)]; } } for (line=0; line<=linemax; line++){ xdma[EXM_ai2(gl,gl.ie,line)]/=xdma[EXM_ai2(gl,hbw,line)]; xdma[EXM_ai2(gl,hbw,line)]=1.0e0; } } */ /*solves a x-diagonal matrix as defined by the pointer variable xdma with the size in gl*/ void EXM_solve_XDMA(double *xdma, EXM_gl2D_t gl){ long hbw,line,line2,dc,linemax,col; double fact,sum,aux; /* NOTE: gl.js must be equal to zero gl.is must be equal to zero */ hbw=(gl.ie-gl.is+1)/2-1; linemax=gl.je; for (line=0; line<linemax; line++){ #if defined(OPENMPTHREADS) #pragma omp parallel for private(line2,dc,fact,col) schedule(static) #endif for (line2=line+1; line2<=min(line+hbw,linemax); line2++){ dc=(line2-line); // here the idea is to add line*fact to line2, with dc added to the // column index of line assert(xdma[EXM_ai2(gl,hbw,line)]!=0.0); fact=-xdma[EXM_ai2(gl,hbw-dc,line2)]/xdma[EXM_ai2(gl,hbw,line)]; for (col=hbw; col<gl.ie; col++){ xdma[EXM_ai2(gl,col-dc,line2)]+=fact*xdma[EXM_ai2(gl,col,line)]; } xdma[EXM_ai2(gl,gl.ie,line2)]+=fact*xdma[EXM_ai2(gl,gl.ie,line)]; } } xdma[EXM_ai2(gl,gl.ie,linemax)]/=xdma[EXM_ai2(gl,hbw,linemax)]; xdma[EXM_ai2(gl,hbw,linemax)]=1.0; for (line=linemax-1; line>=0; line--){ sum=0.0; #if defined(OPENMPTHREADS) #pragma omp parallel for reduction(+:sum) private(line2,aux) schedule(static) #endif for (line2=line+1; line2<=min(line+hbw,linemax); line2++){ aux=xdma[EXM_ai2(gl,hbw+line2-line,line)]*xdma[EXM_ai2(gl,gl.ie,line2)]; sum=sum+aux; } xdma[EXM_ai2(gl,gl.ie,line)]-=sum; assert(xdma[EXM_ai2(gl,hbw,line)]!=0.0); xdma[EXM_ai2(gl,gl.ie,line)]/=xdma[EXM_ai2(gl,hbw,line)]; xdma[EXM_ai2(gl,hbw,line)]=1.0; } } void EXM_solve_XDMA_old(double *xdma, EXM_gl2D_t gl){ long hbw,line,line2,dc,linemax,col; double fact; /* NOTE: gl.js must be equal to zero gl.is must be equal to zero */ hbw=(gl.ie-gl.is+1)/2-1; linemax=gl.je; for (line=0; line<linemax; line++){ for (line2=line+1; line2<=min(line+hbw,linemax); line2++){ dc=(line2-line); /* here the idea is to add line*fact to line2, with dc added to the column index of line */ fact=-xdma[EXM_ai2(gl,hbw-dc,line2)]/xdma[EXM_ai2(gl,hbw,line)]; for (col=hbw; col<gl.ie; col++){ xdma[EXM_ai2(gl,col-dc,line2)]+=fact*xdma[EXM_ai2(gl,col,line)]; } xdma[EXM_ai2(gl,gl.ie,line2)]+=fact*xdma[EXM_ai2(gl,gl.ie,line)]; } } for (line=linemax; line>0; line--){ for (line2=line-1; line2>=max(line-hbw,0); line2--){ dc=(line2-line); /* here the idea is to add line*fact to line2, with dc added to the column index of line */ fact=-xdma[EXM_ai2(gl,hbw-dc,line2)]/xdma[EXM_ai2(gl,hbw,line)]; col=hbw; xdma[EXM_ai2(gl,col-dc,line2)]+=fact*xdma[EXM_ai2(gl,col,line)]; xdma[EXM_ai2(gl,gl.ie,line2)]+=fact*xdma[EXM_ai2(gl,gl.ie,line)]; } } for (line=0; line<=linemax; line++){ xdma[EXM_ai2(gl,gl.ie,line)]/=xdma[EXM_ai2(gl,hbw,line)]; xdma[EXM_ai2(gl,hbw,line)]=1.0e0; } } /*============================================================================ TDMA solver =============================================================================*/ /*solves a tri-diagonal matrix as defined by the pointer variables tdm lines*/ void EXM_solve_TDMA(EXM_tdmaline_t *tdma, long numlines) { long line,cnt; double tmp; /*tdma[0].val[0] must be equal to zero*/ /*tdma[numlines-1].val[2] must be equal to zero*/ for (line=0; line<numlines-1; line++){ tmp = -(tdma[line+1].val[0] / tdma[line].val[1]); for (cnt = 1; cnt <= 2; cnt++) tdma[line+1].val[cnt - 1] += tdma[line].val[cnt] * tmp; tdma[line+1].val[3] += tdma[line].val[3] * tmp; tdma[line+1].val[0] = 0.0; } for (line=numlines-1; line>0; line--){ tdma[line].val[3] /= tdma[line].val[1]; tdma[line].val[1] = 1.0; tdma[line-1].val[3] -= tdma[line].val[3] * tdma[line-1].val[2]; tdma[line-1].val[2] = 0.0; } tdma[0].val[3] /= tdma[0].val[1]; tdma[0].val[1] = 1.0; } /*============================================================================ PDMA solver =============================================================================*/ /*solves a penta-diagonal matrix as defined by the pointer variables pdm lines*/ void EXM_solve_PDMA(EXM_pdmaline_t *pdma, long numlines) { long line; double tmp; /* first sweep downwards */ for (line=1; line<numlines-1; line++){ tmp = -(pdma[line+1].val[0] / pdma[line].val[1]); pdma[line+1].val[0] = 0.0; pdma[line+1].val[1] += pdma[line].val[2] * tmp; pdma[line+1].val[2] += pdma[line].val[3] * tmp; pdma[line+1].val[3] += pdma[line].val[4] * tmp; pdma[line+1].val[5] += pdma[line].val[5] * tmp; } /* second sweep downwards */ for (line=0; line<numlines-1; line++){ tmp = -(pdma[line+1].val[1] / pdma[line].val[2]); pdma[line+1].val[1] = 0.0; pdma[line+1].val[2] += pdma[line].val[3] * tmp; pdma[line+1].val[3] += pdma[line].val[4] * tmp; pdma[line+1].val[5] += pdma[line].val[5] * tmp; } /* first sweep upwards */ for (line=numlines-2; line>0; line--){ tmp = -(pdma[line-1].val[4] / pdma[line].val[3]); pdma[line-1].val[4] =0.0; pdma[line-1].val[3] += pdma[line].val[2]*tmp ; pdma[line-1].val[2] += pdma[line].val[1]*tmp ; pdma[line-1].val[1] += pdma[line].val[0]*tmp ; pdma[line-1].val[5] += pdma[line].val[5]*tmp; } /* second sweep upwards */ for (line=numlines-1; line>0; line--){ pdma[line].val[5] /= pdma[line].val[2]; pdma[line].val[2] = 1.0; pdma[line-1].val[5] -= pdma[line].val[5] * pdma[line-1].val[3]; pdma[line-1].val[3] = 0.0; } pdma[0].val[5] /= pdma[0].val[2]; pdma[0].val[2] = 1.0; } /*====================================================================== Block TDMA solver ====================================================================== */ long EXM_mi(long k, long line, long row, long col){ long tmp; tmp=line*k*k+col*k+row; return(tmp); } static void mat_add_tdma(double *mat1, long line1, double *mat2, long line2, double *mat3, long line3, long k){ long row,col; for (row=0; row<k; row++){ for (col=0; col<k; col++){ mat3[EXM_mi(k,line3,row,col)]= mat1[EXM_mi(k,line1,row,col)] +mat2[EXM_mi(k,line2,row,col)]; } } } static void mat_mult_tdma(double *mat1, long line1, double *mat2,long line2, double *mat3, long line3, long k){ long row,col,cnt; for (row=0; row<k; row++){ for (col=0; col<k; col++){ mat3[EXM_mi(k,line3,row,col)]=0.0e0; for (cnt=0; cnt<k; cnt++){ mat3[EXM_mi(k,line3,row,col)]=mat3[EXM_mi(k,line3,row,col)]+ mat1[EXM_mi(k,line1,row,cnt)]*mat2[EXM_mi(k,line2,cnt,col)]; } } } } static void mat_init_i_tdma(double *mat1, long line1, long k){ long m,n; for (m=0; m<k; m++){ for (n=0; n<k; n++){ mat1[EXM_mi(k,line1,m,n)]=0.0e0; } } for (m=0; m<k; m++){ mat1[EXM_mi(k,line1,m,m)]=1.0e0; } } static void mat_inv_tdma(double *mat1,long line1,double *mat2,long line2,long k){ long row,row2,col; double multfact; /* Idea: init mat2 as identity; gaussian elimination on mat1/mat2 */ mat_init_i_tdma(mat2,line2,k); for (row=0; row<k; row++){ for (row2=0; row2<k; row2++){ if (row2 != row) { multfact=-mat1[EXM_mi(k,line1,row2,row)] /(mat1[EXM_mi(k,line1,row,row)]+0.0e-30); /* Add line row*multfact to line row2 */ for (col=0; col<k; col++){ mat1[EXM_mi(k,line1,row2,col)]=mat1[EXM_mi(k,line1,row2,col)]+ mat1[EXM_mi(k,line1,row,col)]*multfact; mat2[EXM_mi(k,line2,row2,col)]=mat2[EXM_mi(k,line2,row2,col)]+ mat2[EXM_mi(k,line2,row,col)]*multfact; } } } } for (row=0; row<k; row++){ for (col=0; col<k; col++){ assert(mat1[EXM_mi(k,line1,row,row)]!=0.0e0); mat2[EXM_mi(k,line2,row,col)]=mat2[EXM_mi(k,line2,row,col)]/(mat1[EXM_mi(k,line1,row,row)]); } mat1[EXM_mi(k,line1,row,row)]=1.e0; } } static void mat_equal_tdma(double *mat1,long line1,double *mat2,long line2, long k){ long row,col; for (row=0; row<k; row++){ for (col=0; col<k; col++){ mat2[EXM_mi(k,line2,row,col)]=mat1[EXM_mi(k,line1,row,col)]; } } } static void find_multfact(double *mat1,long line1,double *mat2,long line2, double *mat3,long line3, long k){ long row,col; /* Idea: mat3*mat1=-mat2 or: mat3=-mat2*mat1_inv */ mat_equal_tdma(mat1,line1,mat3,5,k); mat_inv_tdma(mat3,5,mat3,4,k); mat_mult_tdma(mat2,line2,mat3,4,mat3,line3,k); for (row=0; row<k; row++){ for (col=0; col<k; col++){ mat3[EXM_mi(k,line3,row,col)]=-mat3[EXM_mi(k,line3,row,col)]; } } } /* solve block TDMA with first line at line=0 and last line at line=linemax */ void EXM_solve_block_TDMA(double *AA, double *BB, double *CC, double *RHS, long linemax, long k){ long line; double *TMP; TMP=(double *) malloc(6*k*k*sizeof(double)); /* -------------------------------------------------------------- Sweep Downward -------------------------------------------------------------- */ for (line=0; line<linemax; line++){ find_multfact(BB,line,AA,line+1,TMP,1,k); mat_mult_tdma(TMP,1,CC,line,TMP,2,k); mat_add_tdma(TMP,2,BB,line+1,TMP,3,k); mat_equal_tdma(TMP,3,BB,line+1,k); mat_mult_tdma(TMP,1,RHS,line,TMP,2,k); mat_add_tdma(TMP,2,RHS,line+1,TMP,3,k); mat_equal_tdma(TMP,3,RHS,line+1,k); } /* -------------------------------------------------------------- Sweep Upward -------------------------------------------------------------- */ for (line=linemax; line>0; line--){ find_multfact(BB,line,CC,line-1,TMP,1,k); mat_mult_tdma(TMP,1,RHS,line,TMP,2,k); mat_add_tdma(TMP,2,RHS,line-1,TMP,3,k); mat_equal_tdma(TMP,3,RHS,line-1,k); } /* -------------------------------------------------------------- Make BB identity -------------------------------------------------------------- */ for (line=0; line<=linemax; line++){ mat_inv_tdma(BB,line,TMP,1,k); mat_mult_tdma(TMP,1,RHS,line,TMP,2,k); mat_equal_tdma(TMP,2,RHS,line,k); } free(TMP); } void EXM_solve_block_TDMA_and_check(double *AA, double *BB, double *CC, double *DD, long linemax, long k){ long line,cnt; double *A,*B,*C,*D; double *TMP; bool PROBLEM; TMP=(double *) malloc(20*k*k*sizeof(double)); A=(double *) malloc((linemax+2)*k*k*sizeof(double)); B=(double *) malloc((linemax+2)*k*k*sizeof(double)); C=(double *) malloc((linemax+2)*k*k*sizeof(double)); D=(double *) malloc((linemax+2)*k*k*sizeof(double)); for (line=0; line<=linemax; line++){ mat_equal_tdma(AA,line,A,line,k); mat_equal_tdma(BB,line,B,line,k); mat_equal_tdma(CC,line,C,line,k); mat_equal_tdma(DD,line,D,line,k); } EXM_solve_block_TDMA(AA, BB, CC, DD, linemax, k); /* now, compare A[line]*DD[line-1]+B[line]*DD[line]+C[line]*DD[line+1] and D[line] */ for (line=0; line<=linemax; line++){ mat_mult_tdma(B,line,DD,line,TMP,10,k); if (line!=linemax) { mat_mult_tdma(C,line,DD,line+1,TMP,11,k); mat_add_tdma(TMP,10,TMP,11,TMP,12,k); mat_equal_tdma(TMP,12,TMP,10,k); } if (line!=0) { mat_mult_tdma(A,line,DD,line-1,TMP,11,k); mat_add_tdma(TMP,10,TMP,11,TMP,12,k); mat_equal_tdma(TMP,12,TMP,10,k); } PROBLEM=FALSE; for (cnt=0; cnt<k; cnt++) if (fabs(TMP[EXM_mi(k,10,cnt,0)]-D[EXM_mi(k,line,cnt,0)])>1.0e-10) PROBLEM=TRUE; if (PROBLEM) { for (cnt=0; cnt<k; cnt++) printf("%E ",TMP[EXM_mi(k,10,cnt,0)]); printf("\n"); for (cnt=0; cnt<k; cnt++) printf("%E ",D[EXM_mi(k,line,cnt,0)]); printf("\n\n"); } } free(A); free(B); free(C); free(D); free(TMP); } /* solve block PDMA with first line at line=0 and last line at line=linemax */ void EXM_solve_block_PDMA(double *AA, double *BB, double *CC, double *DD, double *EE, double *RHS, long linemax, long k){ long line; double *TMP; TMP=(double *) malloc(6*k*k*sizeof(double)); /* -------------------------------------------------------------- Sweep Downward -------------------------------------------------------------- */ for (line=1; line<linemax; line++){ /* ------------First line Downwards------------------------ */ find_multfact(CC,line,BB,line+1,TMP,1,k); mat_mult_tdma(TMP,1,DD,line,TMP,2,k); mat_add_tdma(TMP,2,CC,line+1,TMP,3,k); mat_equal_tdma(TMP,3,CC,line+1,k); mat_mult_tdma(TMP,1,EE,line,TMP,2,k); mat_add_tdma(TMP,2,DD,line+1,TMP,3,k); mat_equal_tdma(TMP,3,DD,line+1,k); mat_mult_tdma(TMP,1,RHS,line,TMP,2,k); mat_add_tdma(TMP,2,RHS,line+1,TMP,3,k); mat_equal_tdma(TMP,3,RHS,line+1,k); /* ------------Second Line Downwards----------------------- */ if (line != linemax-1) { find_multfact(CC,line,AA,line+2,TMP,1,k); mat_mult_tdma(TMP,1,DD,line,TMP,2,k); mat_add_tdma(TMP,2,BB,line+2,TMP,3,k); mat_equal_tdma(TMP,3,BB,line+2,k); mat_mult_tdma(TMP,1,EE,line,TMP,2,k); mat_add_tdma(TMP,2,CC,line+2,TMP,3,k); mat_equal_tdma(TMP,3,CC,line+2,k); mat_mult_tdma(TMP,1,RHS,line,TMP,2,k); mat_add_tdma(TMP,2,RHS,line+2,TMP,3,k); mat_equal_tdma(TMP,3,RHS,line+2,k); } } /* -------------------------------------------------------------- Sweep Upward -------------------------------------------------------------- */ for (line=linemax; line>0; line--){ /* -------First Line upwards------------------------------ */ find_multfact(CC,line,DD,line-1,TMP,1,k); mat_mult_tdma(TMP,1,RHS,line,TMP,2,k); mat_add_tdma(TMP,2,RHS,line-1,TMP,3,k); mat_equal_tdma(TMP,3,RHS,line-1,k); /* -------Second Line Upwards----------------------------- */ if (line != 1) { find_multfact(CC,line,EE,line-2,TMP,1,k); mat_mult_tdma(TMP,1,RHS,line,TMP,2,k); mat_add_tdma(TMP,2,RHS,line-2,TMP,3,k); mat_equal_tdma(TMP,3,RHS,line-2,k); } } /* -------------------------------------------------------------- Make CC identity -------------------------------------------------------------- */ for (line=0; line<=linemax; line++){ mat_inv_tdma(CC,line,TMP,1,k); mat_mult_tdma(TMP,1,RHS,line,TMP,2,k); mat_equal_tdma(TMP,2,RHS,line,k); } free(TMP); } /* Other Math subroutines of interes*/ double sign_old(double x){ double tmp; if (x<0) { tmp=-1.0e0; } else { tmp=1.0e0; } return(tmp); } long mod_old(long numb, long div){ long tmp; long tmp2; long tmp3; tmp=numb/div; tmp2=tmp*div; tmp3=numb-tmp2; return (tmp3); } long mod_old2 (long a, long b) { if(b < 0) //you can check for b == 0 separately and do what you want return mod(-a, -b); long ret = a % b; if(ret < 0) ret+=b; return ret; } double powint(double x, long y){ double sum; long cnt; sum=1.0e0; for (cnt=1; cnt<=labs(y); cnt++){ sum=sum*x; } return(sum); } double krodelta(long i, long j){ double tmp; if (i==j) { tmp=1.0e0; } else { tmp=0.0e0; } return(tmp); } void EXM_init_matrix(EXM_mat_t *mat, long numrow, long numcol){ long row,col; mat->cont=(double *)malloc(numrow*numcol*sizeof(double)); mat->glm.numcol=numcol; mat->glm.numrow=numrow; for (row=0; row<numrow; row++){ for (col=0; col<numcol; col++){ mat->cont[EXM_aim(mat->glm,row,col)]=0.0e0; } } } void EXM_reinit_matrix(EXM_mat_t *mat, long numrow, long numcol){ long row,col; mat->cont=(double *)realloc(mat->cont,numrow*numcol*sizeof(double)); mat->glm.numcol=numcol; mat->glm.numrow=numrow; for (row=0; row<numrow; row++){ for (col=0; col<numcol; col++){ mat->cont[EXM_aim(mat->glm,row,col)]=0.0e0; } } } void EXM_free_matrix(EXM_mat_t *mat){ free(mat->cont); } void EXM_init_identity_matrix(EXM_mat_t *mat, long numrow, long numcol){ long row,col; mat->cont=(double *)malloc(numrow*numcol*sizeof(double)); mat->glm.numcol=numcol; mat->glm.numrow=numrow; for (row=0; row<numrow; row++){ for (col=0; col<numcol; col++){ if (row==col) { mat->cont[EXM_aim(mat->glm,row,col)]=1.0e0; } else { mat->cont[EXM_aim(mat->glm,row,col)]=0.0e0; } } } } void EXM_display_matrix(EXM_mat_t mat){ long row,col; printf("numrow=%ld numcol=%ld\n",mat.glm.numrow,mat.glm.numcol); for (row=0; row<mat.glm.numrow; row++){ for (col=0; col<mat.glm.numcol; col++){ printf("%E ",mat.cont[EXM_aim(mat.glm,row,col)]); } printf("\n"); } printf("\n"); } void EXM_multiply_matrices(EXM_mat_t mat1, EXM_mat_t mat2, EXM_mat_t *matr){ long cnt,numrow,numcol,row,col; numrow=mat1.glm.numrow; numcol=mat2.glm.numcol; EXM_reinit_matrix(matr,numrow,numcol); if (mat2.glm.numrow==mat1.glm.numcol) { for (row=0; row<numrow; row++){ for (col=0; col<numcol; col++){ matr->cont[EXM_aim(matr->glm,row,col)]=0.0e0; for (cnt=0; cnt<mat1.glm.numcol; cnt++){ matr->cont[EXM_aim(matr->glm,row,col)]=matr->cont[EXM_aim(matr->glm,row,col)] +mat1.cont[EXM_aim(mat1.glm,row,cnt)] *mat2.cont[EXM_aim(mat2.glm,cnt,col)]; } } } } else { printf("cannot multiply the two matrices\n"); } } void EXM_invert_matrix_gaussian_elimination(EXM_mat_t mat, EXM_mat_t *matinv){ long row,col,row2; double fact; EXM_mat_t mattmp; EXM_init_matrix(&mattmp,mat.glm.numrow,mat.glm.numcol); if (mat.glm.numrow==mat.glm.numcol) { EXM_reinit_matrix(matinv,mat.glm.numrow,mat.glm.numcol); for (row=0; row<mat.glm.numrow; row++){ for (col=0; col<mat.glm.numcol; col++){ mattmp.cont[EXM_aim(mat.glm,row,col)]=mat.cont[EXM_aim(mat.glm,row,col)]; matinv->cont[EXM_aim(mat.glm,row,col)]=0.0; } matinv->cont[EXM_aim(mat.glm,row,row)]=1.0; } // make the non-diagonal elements zero for mattmp for (row=0; row<mat.glm.numrow; row++){ for (row2=0; row2<mat.glm.numrow; row2++){ if (row2!=row) { if (mattmp.cont[EXM_aim(mat.glm,row,row)]==0.0){ printf("matrix cannot be inverted\n"); } fact=-mattmp.cont[EXM_aim(mat.glm,row2,row)]/mattmp.cont[EXM_aim(mat.glm,row,row)]; for (col=0; col<mat.glm.numcol; col++){ mattmp.cont[EXM_aim(mat.glm,row2,col)]+=fact*mattmp.cont[EXM_aim(mat.glm,row,col)]; matinv->cont[EXM_aim(mat.glm,row2,col)]+=fact*matinv->cont[EXM_aim(mat.glm,row,col)]; } } } } // EXM_display_matrix(mattmp); // make the diagonal elements equal to 1 for mattmp for (row=0; row<mat.glm.numrow; row++){ if (mattmp.cont[EXM_aim(mat.glm,row,row)]==0.0){ printf("matrix cannot be inverted\n"); } fact=1.0/mattmp.cont[EXM_aim(mat.glm,row,row)]; for (col=0; col<mat.glm.numcol; col++){ mattmp.cont[EXM_aim(mat.glm,row,col)]*=fact; matinv->cont[EXM_aim(mat.glm,row,col)]*=fact; } } } else { printf("matrix cannot be inverted\n"); printf("number of rows not equal to number of columns \n"); } EXM_free_matrix(&mattmp); } // function by Jaehyuk Lee void EXM_invert_matrix_partial_pivoting(EXM_mat_t mat, EXM_mat_t *matinv){ long pivot,row,row2,col; double temp,pivotval,fact; EXM_mat_t mattmp; EXM_init_matrix(&mattmp,mat.glm.numrow,mat.glm.numcol); if (mat.glm.numrow==mat.glm.numcol) { EXM_reinit_matrix(matinv,mat.glm.numrow,mat.glm.numcol); for (row=0; row<mat.glm.numrow; row++){ for (col=0; col<mat.glm.numcol; col++){ mattmp.cont[EXM_aim(mat.glm,row,col)]=mat.cont[EXM_aim(mat.glm,row,col)]; matinv->cont[EXM_aim(mat.glm,row,col)]=0.0; } matinv->cont[EXM_aim(mat.glm,row,row)]=1.0; } //process starts from here for(row=0;row<mat.glm.numrow-1;row++){ //partial pivoting start pivot=row; pivotval=mattmp.cont[EXM_aim(mat.glm,row,row)]; for(row2=row+1;row2<mat.glm.numrow;row2++){ if(fabs(pivotval)<fabs(mattmp.cont[EXM_aim(mat.glm,row2,row)])){ pivot=row2; pivotval=mattmp.cont[EXM_aim(mat.glm,row2,row)]; } } if(pivot!=row){ for(col=0;col<mat.glm.numrow;col++){ //partial pivoting of mattmp temp=mattmp.cont[EXM_aim(mat.glm,pivot,col)]; mattmp.cont[EXM_aim(mat.glm,pivot,col)]=mattmp.cont[EXM_aim(mat.glm,row,col)]; mattmp.cont[EXM_aim(mat.glm,row,col)]=temp; //partial pivoting of matinv temp=matinv->cont[EXM_aim(mat.glm,pivot,col)]; matinv->cont[EXM_aim(mat.glm,pivot,col)]=matinv->cont[EXM_aim(mat.glm,row,col)]; matinv->cont[EXM_aim(mat.glm,row,col)]=temp; } }//partial pivoting complete //forward substitution start for(row2=row+1;row2<mat.glm.numrow;row2++){ assert(mattmp.cont[EXM_aim(mat.glm,row,row)]!=0.0e0); fact=-(mattmp.cont[EXM_aim(mat.glm,row2,row)])/(mattmp.cont[EXM_aim(mat.glm,row,row)]); for(col=0;col<mat.glm.numrow;col++){ matinv->cont[EXM_aim(mat.glm,row2,col)]=matinv->cont[EXM_aim(mat.glm,row2,col)]+matinv->cont[EXM_aim(mat.glm,row,col)]*fact; } for(col=row;col<mat.glm.numrow;col++){ mattmp.cont[EXM_aim(mat.glm,row2,col)]=mattmp.cont[EXM_aim(mat.glm,row2,col)]+mattmp.cont[EXM_aim(mat.glm,row,col)]*fact; } mattmp.cont[EXM_aim(mat.glm,row2,row)]=0.0e0; }//forward substitution complete } //backward substitution start for(row=mat.glm.numrow-1;row>=1;row--){ //multiply fact over matinv for(row2=row-1;row2>=0;row2--){ assert(mattmp.cont[EXM_aim(mat.glm,row,row)]!=0.0e0); fact=-mattmp.cont[EXM_aim(mat.glm,row2,row)]/mattmp.cont[EXM_aim(mat.glm,row,row)]; for(col=0;col<mat.glm.numrow;col++){ matinv->cont[EXM_aim(mat.glm,row2,col)]=matinv->cont[EXM_aim(mat.glm,row2,col)]+matinv->cont[EXM_aim(mat.glm,row,col)]*fact; } } } //devide matinv by diagonal of mattmp for(row=0;row<mat.glm.numrow;row++){ for(col=0;col<mat.glm.numrow;col++){ assert(mattmp.cont[EXM_aim(mat.glm,row,row)]!=0.0e0); matinv->cont[EXM_aim(mat.glm,row,col)]=matinv->cont[EXM_aim(mat.glm,row,col)]/mattmp.cont[EXM_aim(mat.glm,row,row)]; } }//backward substitution complete //EXM_display_matrix(mattmp); } else{ printf("matrix cannot be inverted\n"); printf("number of rows not equal to number of columns \n"); } EXM_free_matrix(&mattmp); } void EXM_invert_matrix(EXM_mat_t mat, EXM_mat_t *matinv){ //EXM_invert_matrix_gaussian_elimination(mat,matinv); EXM_invert_matrix_partial_pivoting(mat,matinv); } void EXM_invert_matrix_analytical(EXM_mat_t mat, EXM_mat_t *matinv){ double den; if ((mat.glm.numrow==mat.glm.numcol) && (mat.glm.numrow<4)) { EXM_reinit_matrix(matinv,mat.glm.numrow,mat.glm.numcol); if (mat.glm.numrow==1){ den=mat.cont[EXM_aim(mat.glm,0,0)]; assert(den!=0.0); matinv->cont[EXM_aim(matinv->glm,0,0)]=1.0e0/den; } if (mat.glm.numrow==2){ den=mat.cont[EXM_aim(mat.glm,0,0)]*mat.cont[EXM_aim(mat.glm,1,1)] -mat.cont[EXM_aim(mat.glm,0,1)]*mat.cont[EXM_aim(mat.glm,1,0)]; assert(den!=0.0); matinv->cont[EXM_aim(matinv->glm,0,0)]=mat.cont[EXM_aim(mat.glm,1,1)]/den; matinv->cont[EXM_aim(matinv->glm,0,1)]=-mat.cont[EXM_aim(mat.glm,0,1)]/den; matinv->cont[EXM_aim(matinv->glm,1,0)]=-mat.cont[EXM_aim(mat.glm,1,0)]/den; matinv->cont[EXM_aim(matinv->glm,1,1)]=mat.cont[EXM_aim(mat.glm,0,0)]/den; } if (mat.glm.numrow==3){ den=mat.cont[EXM_aim(mat.glm,0,0)]*mat.cont[EXM_aim(mat.glm,1,1)]*mat.cont[EXM_aim(mat.glm,2,2)] -mat.cont[EXM_aim(mat.glm,0,0)]*mat.cont[EXM_aim(mat.glm,1,2)]*mat.cont[EXM_aim(mat.glm,2,1)] -mat.cont[EXM_aim(mat.glm,1,0)]*mat.cont[EXM_aim(mat.glm,0,1)]*mat.cont[EXM_aim(mat.glm,2,2)] +mat.cont[EXM_aim(mat.glm,1,0)]*mat.cont[EXM_aim(mat.glm,0,2)]*mat.cont[EXM_aim(mat.glm,2,1)] +mat.cont[EXM_aim(mat.glm,2,0)]*mat.cont[EXM_aim(mat.glm,0,1)]*mat.cont[EXM_aim(mat.glm,1,2)] -mat.cont[EXM_aim(mat.glm,2,0)]*mat.cont[EXM_aim(mat.glm,0,2)]*mat.cont[EXM_aim(mat.glm,1,1)]; assert(den!=0.0); matinv->cont[EXM_aim(mat.glm,0,0)]=(mat.cont[EXM_aim(mat.glm,1,1)]*mat.cont[EXM_aim(mat.glm,2,2)]-mat.cont[EXM_aim(mat.glm,1,2)]*mat.cont[EXM_aim(mat.glm,2,1)])/den; matinv->cont[EXM_aim(mat.glm,0,1)]=(mat.cont[EXM_aim(mat.glm,2,1)]*mat.cont[EXM_aim(mat.glm,0,2)]-mat.cont[EXM_aim(mat.glm,2,2)]*mat.cont[EXM_aim(mat.glm,0,1)])/den; matinv->cont[EXM_aim(mat.glm,0,2)]=(mat.cont[EXM_aim(mat.glm,0,1)]*mat.cont[EXM_aim(mat.glm,1,2)]-mat.cont[EXM_aim(mat.glm,0,2)]*mat.cont[EXM_aim(mat.glm,1,1)])/den; matinv->cont[EXM_aim(mat.glm,1,0)]=(mat.cont[EXM_aim(mat.glm,1,2)]*mat.cont[EXM_aim(mat.glm,2,0)]-mat.cont[EXM_aim(mat.glm,1,0)]*mat.cont[EXM_aim(mat.glm,2,2)])/den; matinv->cont[EXM_aim(mat.glm,1,1)]=(mat.cont[EXM_aim(mat.glm,2,2)]*mat.cont[EXM_aim(mat.glm,0,0)]-mat.cont[EXM_aim(mat.glm,2,0)]*mat.cont[EXM_aim(mat.glm,0,2)])/den; matinv->cont[EXM_aim(mat.glm,1,2)]=(mat.cont[EXM_aim(mat.glm,0,2)]*mat.cont[EXM_aim(mat.glm,1,0)]-mat.cont[EXM_aim(mat.glm,0,0)]*mat.cont[EXM_aim(mat.glm,1,2)])/den; matinv->cont[EXM_aim(mat.glm,2,0)]=(mat.cont[EXM_aim(mat.glm,1,0)]*mat.cont[EXM_aim(mat.glm,2,1)]-mat.cont[EXM_aim(mat.glm,1,1)]*mat.cont[EXM_aim(mat.glm,2,0)])/den; matinv->cont[EXM_aim(mat.glm,2,1)]=(mat.cont[EXM_aim(mat.glm,2,0)]*mat.cont[EXM_aim(mat.glm,0,1)]-mat.cont[EXM_aim(mat.glm,2,1)]*mat.cont[EXM_aim(mat.glm,0,0)])/den; matinv->cont[EXM_aim(mat.glm,2,2)]=(mat.cont[EXM_aim(mat.glm,0,0)]*mat.cont[EXM_aim(mat.glm,1,1)]-mat.cont[EXM_aim(mat.glm,0,1)]*mat.cont[EXM_aim(mat.glm,1,0)])/den; } } else { printf("mat.contrix cannot be inverted\n"); printf("number of rows not equal to number of columns \n"); } } double rad(double angle){ double tmp; tmp=angle*pi/180.0e0; return(tmp); } double deg(double angle){ double tmp; tmp=angle/pi*180.0e0; return(tmp); } /* find orthogonal vector to the three points pa, pb and pc in the xyz frame of reference */ void EXM_find_orthogonal_vector(EXM_vec3D_t pa, EXM_vec3D_t pb, EXM_vec3D_t pc, EXM_vec3D_t orthovect){ EXM_vec3D_t dp1,dp2; long cnt; for (cnt=0; cnt<3; cnt++){ dp1[cnt]=pa[cnt]-pc[cnt]; dp2[cnt]=pb[cnt]-pc[cnt]; } orthovect[0]=dp1[1]*dp2[2]-dp1[2]*dp2[1]; orthovect[1]=dp1[2]*dp2[0]-dp1[0]*dp2[2]; orthovect[2]=dp1[0]*dp2[1]-dp1[1]*dp2[0]; } /* find plane A*x+B*y+C*z=D from orthogonal vector and one point on plane p0 */ void EXM_find_plane(EXM_vec3D_t orthovect, EXM_vec3D_t p0, double *A, double *B, double *C, double *D){ *A=orthovect[0]; *B=orthovect[1]; *C=orthovect[2]; *D=(*A)*p0[0]+(*B)*p0[1]+(*C)*p0[2]; } /* find pp, a point on the plane A-B-C-D which also lies on the line composed of vect and p0 */ void EXM_find_point_in_plane_on_vector(EXM_vec3D_t vect, EXM_vec3D_t p0, double A, double B, double C, double D, EXM_vec3D_t pp){ double t; long cnt; assert(A*vect[0]+B*vect[1]+C*vect[2]!=0.0e0); t=(D-A*p0[0]-B*p0[1]-C*p0[2])/(A*vect[0]+B*vect[1]+C*vect[2]); for (cnt=0; cnt<3; cnt++) pp[cnt]=p0[cnt]+vect[cnt]*t; } /* the plane is defined by points pa,pb and pc while the point to be mirrored is pp_o. The mirrored point is pp_m; pp_p is the point on the plane nearest to pp_o, midway between pp_o and pp_m */ void EXM_mirror_point_wrt_plane(EXM_vec3D_t pa, EXM_vec3D_t pb, EXM_vec3D_t pc, EXM_vec3D_t pp_o, EXM_vec3D_t pp_m){ EXM_vec3D_t orthovect; double A,B,C,D; EXM_vec3D_t pp_p; long cnt; EXM_find_orthogonal_vector(pa,pb,pc,orthovect); EXM_find_plane(orthovect, pa, &A, &B, &C, &D); EXM_find_point_in_plane_on_vector(orthovect, pp_o, A, B, C, D, pp_p); for (cnt=0; cnt<3; cnt++) pp_m[cnt]=2.0e0*pp_p[cnt]-pp_o[cnt]; } void EXM_display_vector(EXM_vec3D_t pp){ printf("x=%E y=%E z=%E \n",pp[0],pp[1],pp[2]); } double EXM_dot_product(EXM_vec3D_t vec1, EXM_vec3D_t vec2){ double sum; long dim; sum=0.0e0; for (dim=0; dim<3; dim++){ sum=sum+vec1[dim]*vec2[dim]; } return(sum); } void EXM_cross_product(EXM_vec3D_t vec1, EXM_vec3D_t vec2, EXM_vec3D_t prod){ prod[0]=vec1[1]*vec2[2]-vec1[2]*vec2[1]; prod[1]=vec1[2]*vec2[0]-vec1[0]*vec2[2]; prod[2]=vec1[0]*vec2[1]-vec1[1]*vec2[0]; } double EXM_vector_magnitude(EXM_vec3D_t vec){ long dim; double sum; sum=0.0e0; for (dim=0; dim<3; dim++){ sum=sum+vec[dim]*vec[dim]; } sum=sqrt(sum); return(sum); } double EXM_angle_between_vectors(EXM_vec3D_t vec1, EXM_vec3D_t vec2){ double theta; theta=acos(EXM_dot_product(vec1,vec2)/EXM_vector_magnitude(vec1)/EXM_vector_magnitude(vec2)); return(theta); } double EXM_area_quadrilateral(EXM_vec3D_t A, EXM_vec3D_t B, EXM_vec3D_t C, EXM_vec3D_t D){ EXM_vec3D_t x,y,z; double theta,phi,area; long dim; for (dim=0; dim<3; dim++){ x[dim]=A[dim]-B[dim]; y[dim]=C[dim]-B[dim]; z[dim]=D[dim]-B[dim]; } theta=EXM_angle_between_vectors(y,z); phi=EXM_angle_between_vectors(x,y); area=0.5e0*EXM_vector_magnitude(y)*(EXM_vector_magnitude(z)*fabs(sin(theta))+EXM_vector_magnitude(x)*fabs(sin(phi))); return(area); } /* numerically differentiate FUNCT (which returns dx/dt) from t1 to t2, starting from x1, and returning x2 */ double EXM_numerical_differentiation(double(*FUNCT)(void *, double, double), void *arg_ptr, long METHOD, long n, double x1, double t1, double t2, long *error){ double x,dt,t; double x_1,x_2,x_3; long cnt; *error=0; dt=(t2-t1)/(double)n; x=x1; t=t1; if (METHOD==EXM_NUMDIFF_FORWARDEULER) for (cnt=0; cnt<n; cnt++) { x+=dt*(*FUNCT)(arg_ptr,x,t); t+=dt; } if (METHOD==EXM_NUMDIFF_RUNGEKUTTA) for (cnt=0; cnt<n; cnt++) { x_1=x+0.5*dt*(*FUNCT)(arg_ptr,x,t); x_2=x+0.5*dt*(*FUNCT)(arg_ptr,x_1,t+0.5*dt); x_3=x+dt*(*FUNCT)(arg_ptr,x_2,t+0.5*dt); x+=(x_1-x)/3.0 +(x_2-x)*2.0/3.0 +(x_3-x)/3.0 +dt/6.0*(*FUNCT)(arg_ptr,x_2,t+dt); t+=dt; } if (METHOD==EXM_NUMDIFF_IMPROVEDEULER) for (cnt=0; cnt<n; cnt++) { x+=0.5*dt*(*FUNCT)(arg_ptr,x,t) +0.5*dt*(*FUNCT)(arg_ptr,x+dt*(*FUNCT)(arg_ptr,x,t),t+dt); t+=dt; } if (METHOD==EXM_NUMDIFF_MODIFIEDEULER) for (cnt=0; cnt<n; cnt++) { x+=dt*(*FUNCT)(arg_ptr,x+0.5e0*dt*(*FUNCT)(arg_ptr,x,t),t+dt*0.5); t+=dt; } return(x); } double EXM_numerical_integration(double(*FUNCT)(void *, double), void *arg_ptr, long METHOD, long n, double x1, double x2, long *error){ double f,f1,f2,f3,x,sum,dx; sum=0.0; *error=0; dx=(x2-x1)/(double)n; if (METHOD==EXM_NUMINTEG_RECTANGLES) { sum=0.0e0; for (x=x1+dx*0.5e0; x<x2; x+=dx){ f=(*FUNCT)(arg_ptr,x); sum+=f*dx; } } if (METHOD==EXM_NUMINTEG_POLY2) { /* integrate using second degree polynomials */ f1=(*FUNCT)(arg_ptr,x1); f2=(*FUNCT)(arg_ptr,x1+dx); f3=(*FUNCT)(arg_ptr,x1+2.0*dx); sum=dx*(27.0/24.0*f2+9.0/24.0*f1); for (x=x1+2.0*dx; x<x2-1.5*dx; x+=dx){ f1=f2; f2=f3; f3=(*FUNCT)(arg_ptr,x+dx); sum+=dx*(f1/24.0+11.0/12.0*f2+f3/24.0); } f1=f2; f2=f3; f3=(*FUNCT)(arg_ptr,x2); sum+=dx*(27.0/24.0*f2+9.0/24.0*f3); } return(sum); } /* numerically integrate the function FUNCT(x) between x=x1 and x=x2, returning the value of the integral */ void EXM_numerical_integration_vector(void(*FUNCT)(void *, double, EXM_vec3D_t vector), void *arg_ptr, long METHOD, long n, double x1, double x2, long *error, EXM_vec3D_t sum){ double x,dx; long dim; EXM_vec3D_t f,f1,f2,f3; for (dim=0; dim<3; dim++) sum[dim]=0.0; *error=0; dx=(x2-x1)/(double)n; if (METHOD==EXM_NUMINTEG_RECTANGLES) { for (dim=0; dim<3; dim++) sum[dim]=0.0; for (x=x1+dx*0.5e0; x<x2; x+=dx){ (*FUNCT)(arg_ptr,x,f); for (dim=0; dim<3; dim++) sum[dim]+=f[dim]*dx; } } if (METHOD==EXM_NUMINTEG_POLY2) { /* integrate using second degree polynomials */ (*FUNCT)(arg_ptr,x1,f1); (*FUNCT)(arg_ptr,x1+dx,f2); (*FUNCT)(arg_ptr,x1+2.0*dx,f3); for (dim=0; dim<3; dim++) sum[dim]=dx*(27.0/24.0*f2[dim]+9.0/24.0*f1[dim]); for (x=x1+2.0*dx; x<x2-1.5*dx; x+=dx){ for (dim=0; dim<3; dim++) { f1[dim]=f2[dim]; f2[dim]=f3[dim]; } (*FUNCT)(arg_ptr,x+dx,f3); for (dim=0; dim<3; dim++) sum[dim]+=dx*(f1[dim]/24.0+11.0/12.0*f2[dim]+f3[dim]/24.0); } for (dim=0; dim<3; dim++){ f1[dim]=f2[dim]; f2[dim]=f3[dim]; } (*FUNCT)(arg_ptr,x2,f3); for (dim=0; dim<3; dim++) sum[dim]+=dx*(27.0/24.0*f2[dim]+9.0/24.0*f3[dim]); } } /* returns f at thisx given x,f at each data point */ double EXM_f_from_line(long N, double *x, double *f, double thisx){ long i; double thisf,dxi; if (N<2) EXM_fatal_error("Number of data points supplied for linear interpolation must be at least 2."); /* first find i that is such that x[i]<=thisx<=x[i+1] */ i=-1; do { i++; } while(i<(N-1) && !(x[i]<=thisx && x[i+1]>=thisx)); if (i>=N-1){ EXM_fatal_error("Couldn't find an interval for x=%E in EXM_f_from_spline.",thisx); } assert(i<N-1); dxi=x[i+1]-x[i]; thisf=f[i]+((f[i+1]-f[i])/dxi)*(thisx-x[i]); return(thisf); } /* find b at each node given f and x on all nodes from i=0 to i=N-1 note: *b must have been malloced and given enough memory space prior to calling this function*/ void EXM_find_spline(long N, double *x, double *f, double *b){ EXM_pdmaline_t *pdma; double dxim1,dxip0,dfim1,dfip0; long n; pdma=(EXM_pdmaline_t *)malloc(N*sizeof(EXM_pdmaline_t)); /* do inner nodes first */ for (n=1; n<N-1; n++){ dxim1=x[n]-x[n-1]; dxip0=x[n+1]-x[n]; dfim1=f[n]-f[n-1]; dfip0=f[n+1]-f[n]; pdma[n].val[0]=0.0; pdma[n].val[1]=dxim1; pdma[n].val[2]=2.0*(dxim1+dxip0); pdma[n].val[3]=dxip0; pdma[n].val[4]=0.0; assert(dxip0!=0.0); assert(dxim1!=0.0); pdma[n].val[5]=3.0*(dfip0/dxip0-dfim1/dxim1); } /* do left bdry node */ pdma[0].val[0]=0.0; pdma[0].val[1]=0.0; pdma[0].val[2]=-(x[2]-x[1]); pdma[0].val[3]=(x[1]-x[0])+(x[2]-x[1]); pdma[0].val[4]=-(x[1]-x[0]); pdma[0].val[5]=0.0; /* do right bdry node */ pdma[N-1].val[0]=-(x[N-1]-x[N-2]); pdma[N-1].val[1]=(x[N-2]-x[N-3])+(x[N-1]-x[N-2]); pdma[N-1].val[2]=-(x[N-2]-x[N-3]); pdma[N-1].val[3]=0.0; pdma[N-1].val[4]=0.0; pdma[N-1].val[5]=0.0; EXM_solve_PDMA(pdma, N); for (n=0; n<N; n++) { assert(pdma[n].val[2]!=0.0); b[n]=pdma[n].val[5]/pdma[n].val[2]; } free(pdma); } /* returns f at thisx given x,f,b at each data point */ double EXM_f_from_spline(long N, double *x, double *f, double *b, double thisx){ long i; double thisf,di,ci,bi,ai,dxi; /* first find i that is such that x[i]<=thisx<=x[i+1] */ i=-1; do { i++; } while(i<(N-1) && !(x[i]<=thisx && x[i+1]>=thisx)); if (i>=N-1){ EXM_fatal_error("Couldn't find an interval for x=%E in EXM_f_from_spline.",thisx); } assert(i<N-1); dxi=x[i+1]-x[i]; ai=(b[i+1]-b[i])/(3.0*dxi); bi=b[i]; ci=(f[i+1]-f[i])/dxi-b[i]*dxi-(b[i+1]-b[i])/3.0*dxi; di=f[i]; thisf=ai*(thisx-x[i])*(thisx-x[i])*(thisx-x[i])+bi*(thisx-x[i])*(thisx-x[i])+ci*(thisx-x[i])+di; return(thisf); } /* returns f at thisx given x,f,b at each data point */ double EXM_f_from_monotonespline(long N, double *x, double *f, double thisx){ long i; double thisf,dx,t,deltam1,deltap0,deltap1,mp0,mp1,alpha,beta,tau; mp0=mp1=0.0; //to avoid compiler warning /* first find i that is such that x[i]<=thisx<=x[i+1] */ i=-1; do { i++; } while(i<(N-1) && !(x[i]<=thisx && x[i+1]>=thisx)); if (i>=N-1){ EXM_fatal_error("Couldn't find an interval for x=%E in EXM_f_from_spline.",thisx); } assert(i<N-1); dx=x[i+1]-x[i]; t=(thisx-x[i])/dx; deltap0=(f[i+1]-f[i])/dx; if(i>=1 && i<=N-3) { deltam1=(f[i]-f[i-1])/(x[i]-x[i-1]); mp0=0.5*(deltam1+deltap0); if(deltam1*deltap0<0.0) mp0=0.0; deltap1=(f[i+2]-f[i+1])/(x[i+2]-x[i+1]); mp1=0.5*(deltap0+deltap1); if(deltap0*deltap1<0.0) mp1=0.0; } else if (i==0) { deltap1=(f[i+2]-f[i+1])/(x[i+2]-x[i+1]); mp0=deltap0; mp1=0.5*(deltap0+deltap1); if(deltap0*deltap1<0.0) mp1=0.0; } else if (i==N-2) { deltam1=(f[i]-f[i-1])/(x[i]-x[i-1]); mp0=0.5*(deltam1+deltap0); if(deltam1*deltap0<0.0) mp0=0.0; mp1=deltap0; } else EXM_fatal_error("Input to EXM_f_from_monotonespline() out of range."); alpha=mp0/deltap0; beta=mp1/deltap0; if(alpha*alpha+beta*beta>9.0) { tau=3.0/sqrt(alpha*alpha+beta*beta); mp0=tau*alpha*deltap0; mp1=tau*beta*deltap0; } thisf=f[i]*(2.0*t*t*t-3.0*t*t+1)+dx*mp0*(t*t*t-2.0*t*t+t)+f[i+1]*(-2.0*t*t*t+3.0*t*t)+dx*mp1*(t*t*t-t*t); return(thisf); } #define EOS 0 /* insert str1 into str2 at the position pos; make sure *str2 has enough memory allocated */ char *strins(char *str1, char *str2, long pos){ long len1,len2,i; len1=(long)strlen(str1); len2=(long)strlen(str2); for (i=len2; i>=pos; i--) (str2)[i+len1]=(str2)[i]; for (i=0; i<len1; i++) (str2)[pos+i]=str1[i]; (str2)[len2+len1]=EOS; return str2; } /* add line breaks without breaking words with width the maximum number of characters per line */ char *strwrp(char *str, int width){ long cnt,cntbreak; bool CONTINUE; CONTINUE=TRUE; cntbreak=0; cnt=0; do { cnt++; if (str[cnt]=='\n') cntbreak=cnt; if (cnt-cntbreak>width){ cntbreak=cnt; do { cntbreak--; } while(str[cntbreak]!=' ' && str[cntbreak]!='-' && cntbreak>0); if (cntbreak>0){ if (str[cntbreak]=='-') { strins("\n",str,cntbreak+1); cntbreak++; } str[cntbreak]='\n'; cnt+=1; } else { // problem breaking line.. CONTINUE=FALSE; } } } while (CONTINUE && cnt<strlen(str)-1); return str; } char *strrep(char *str, char *orig, char *repl) { char *p; char *strtmp; strtmp=(char *)malloc(sizeof(char)*(strlen(str)+strlen(repl)+2)); if(!(p = strstr(str, orig))) return str; strncpy(strtmp, str, p-str); strtmp[p-str] = EOS; sprintf(strtmp+(p-str), "%s%s", repl, p+strlen(orig)); strcpy(str,strtmp); free(strtmp); return str; } /* add indent spaces to second and subsequent lines; make sure str has enough memory allocated */ char *strind(char *str, int indent){ long cnt,cnt2; static char whitespace[2]; strcpy(whitespace," "); if (indent<0){ /* hang indent */ cnt=0; do { cnt++; if (str[cnt]=='\n' && str[cnt+1]!=EOS) { for (cnt2=0; cnt2<(-indent); cnt2++) strins(whitespace,str,cnt+1); cnt-=indent; } } while (str[cnt]!=EOS); } else { /* indent */ for (cnt=0; cnt<indent; cnt++) strins(whitespace,str,0); } return str; } /* add line breaks without breaking words with width the maximum number of characters per line and indent the number of indented characters (either negative or positive) */ char *strwrpind(char *str, int width, int indent){ long cnt,cnt2,cntbreak; bool CONTINUE; static char whitespace[2]; strcpy(whitespace," "); if (indent>0){ for (cnt=0; cnt<indent; cnt++) strins(whitespace,str,0); } CONTINUE=TRUE; cntbreak=0; cnt=0; do { cnt++; if (str[cnt]=='\n') cntbreak=cnt; if (cnt-cntbreak>width-1){ cntbreak=cnt; do { cntbreak--; } while(str[cntbreak]!=' ' && str[cntbreak]!='-' && cntbreak>0); if (cntbreak>0){ if (str[cntbreak]=='-') { strins("\n",str,cntbreak+1); cntbreak++; } str[cntbreak]='\n'; cnt++; if (indent<0){ // do the hang indent here for (cnt2=0; cnt2<-indent; cnt2++){ strins(whitespace,str,cntbreak+1); } } } else { // problem breaking line.. CONTINUE=FALSE; } } } while (CONTINUE && cnt<strlen(str)-1); return str; } void find_terminal_window_size(int *width, int *height){ struct winsize w; ioctl(STDOUT_FILENO, TIOCGWINSZ, &w); *width=w.ws_col; *height=w.ws_row; } double avg_harmonic(double arg1, double arg2){ double ret; if (arg1==0.0) EXM_fatal_error("Problem: arg1 is zero in avg_harmonic()."); if (arg2==0.0) EXM_fatal_error("Problem: arg2 is zero in avg_harmonic()."); if (arg1<0.0) EXM_fatal_error("Problem: arg1 is negative in avg_harmonic()."); if (arg2<0.0) EXM_fatal_error("Problem: arg2 is negative in avg_harmonic()."); ret=2.0/(1.0/arg1+1.0/arg2); return(ret); } static int argrank(int argc, char **argv, char *arg){ int cnt,tmp; bool FOUND; tmp=0; FOUND=FALSE; for (cnt=1; cnt<argc; cnt++){ if (strcmp(argv[cnt],arg) == 0) { tmp=cnt; if (!FOUND) FOUND=TRUE; else EXM_fatal_error("The flag %s can not be called twice.",arg); } } return(tmp); } int process_flag_string(int argc, char **argv, char *flag, char **arg){ int RET; int flagrank; flagrank=argrank(argc,argv,flag); if (flagrank!=0) { if (argc<=flagrank+1) EXM_fatal_error("Missing argument after flag %s.",flag); *arg=(char *)realloc(*arg,(2+(long)strlen(argv[flagrank+1]))*sizeof(char)); strcpy(*arg,argv[flagrank+1]); strcpy(argv[flagrank+0],"\0"); strcpy(argv[flagrank+1],"\0"); RET=2; } else { RET=0; } return(RET); } int process_flag_int(int argc, char **argv, char *flag, int *arg){ int RET; int flagrank; int eos=EOS; flagrank=argrank(argc,argv,flag); if (flagrank!=0) { if (argc<=flagrank+1) EXM_fatal_error("Missing argument after flag %s.",flag); if (sscanf(argv[flagrank+1], "%d%n", arg,&eos)!=1 || argv[flagrank+1][eos]!=EOS) { EXM_fatal_error("Expecting integer argument after %s flag.",flag); } strcpy(argv[flagrank+0],"\0"); strcpy(argv[flagrank+1],"\0"); RET=2; } else { RET=0; } return(RET); } int process_flag_double(int argc, char **argv, char *flag, double *arg){ int RET; int flagrank; int eos=EOS; flagrank=argrank(argc,argv,flag); if (flagrank!=0) { if (argc<=flagrank+1) EXM_fatal_error("Missing argument after flag %s.",flag); if (sscanf(argv[flagrank+1], "%lf%n", arg,&eos)!=1 || argv[flagrank+1][eos]!=EOS) { EXM_fatal_error("Expecting integer argument after %s flag.",flag); } strcpy(argv[flagrank+0],"\0"); strcpy(argv[flagrank+1],"\0"); RET=2; } else { RET=0; } return(RET); } int process_flag_int_multiple(int argc, char **argv, char *flag, int **arg){ int RET,cnt; int flagrank; int eos=EOS; bool CONTINUE; flagrank=argrank(argc,argv,flag); if (flagrank!=0) { if (argc<=flagrank+1) EXM_fatal_error("Missing argument after flag %s.",flag); cnt=0; CONTINUE=TRUE; do { cnt++; *arg=(int *)realloc(*arg,(cnt+2)*sizeof(int)); if ((flagrank+cnt)>=argc || sscanf(argv[flagrank+cnt], "%d%n", &((*arg)[cnt-1]),&eos)!=1 || argv[flagrank+cnt][eos]!=EOS) CONTINUE=FALSE; else CONTINUE=TRUE; } while(CONTINUE); RET=cnt; for (cnt=0; cnt<RET; cnt++) strcpy(argv[flagrank+cnt],"\0"); } else { RET=0; } return(RET); } int process_flag_double_multiple(int argc, char **argv, char *flag, double **arg){ int RET,cnt; int flagrank; bool CONTINUE; flagrank=argrank(argc,argv,flag); if (flagrank!=0) { if (argc<=flagrank+1) EXM_fatal_error("Missing argument after flag %s.",flag); cnt=0; CONTINUE=TRUE; do { cnt++; *arg=(double *)realloc(*arg,(cnt+2)*sizeof(double)); if ((flagrank+cnt)>=argc || sscanf(argv[flagrank+cnt], "%lg", &((*arg)[cnt-1]))!=1) CONTINUE=FALSE; else CONTINUE=TRUE; } while(CONTINUE); RET=cnt; for (cnt=0; cnt<RET; cnt++) strcpy(argv[flagrank+cnt],"\0"); } else { RET=0; } return(RET); } int process_flag(int argc, char **argv, char *flag){ int RET; int flagrank; flagrank=argrank(argc,argv,flag); if (flagrank!=0) { strcpy(argv[flagrank],"\0"); RET=1; } else { RET=0; } return(RET); } int find_remaining_options(int argc, char **argv, char **options){ long cnt; int RET; RET=0; *options=(char *)realloc(*options,sizeof(char)); (*options)[0]=EOS; /* start cnt at 1 to exclude the executable */ for (cnt=1; cnt<argc; cnt++){ if ((argv[cnt])[0]!='\0'){ RET++; *options=(char *)realloc(*options,(2+strlen(*options)+strlen(argv[cnt]))*sizeof(char)); strcat(*options,argv[cnt]); strcat(*options," "); } } return(RET); } double min3(double val1, double val2, double val3){ return(min(val1,min(val2,val3))); } double max3(double val1, double val2, double val3){ return(max(val1,max(val2,val3))); } double notzero(double val, double sub){ if (val==0.0) val=sub; return(val); } double minmod(double x, double y){ double tmp; tmp=sign(x)*max(0.0e0,min(fabs(x),sign(x)*y)); return(tmp); } double minmod_old(double val1, double val2){ double ret; if (fabs(val1)>fabs(val2)) ret=val2; else ret=val1; if (val1*val2<0.0) ret=0.0; return(ret); } double minmod3(double x, double y, double z){ double tmp; tmp=sign(x)*max(0.0e0,min(sign(x)*z,min(fabs(x),sign(x)*y))); return(tmp); } double maxmag(double x, double y){ double ret; if (fabs(x)>fabs(y)) ret=x; else ret=y; return(ret); } #ifdef linux #include <execinfo.h> /* perform a backtrace on linux systems; * make sure to compile and link your code with debugging symbols (-g flag) * make sure to link your code with the -rdynamic flag * leave this function commented because it is not ANSI C compliant */ void output_backtrace(void) { void* callstack[128]; int i, frames = backtrace(callstack, 128); char** strs = backtrace_symbols(callstack, frames); for (i = 0; i < frames; ++i) { printf("%s\n", strs[i]); } free(strs); } #else void output_backtrace(void) { printf("No backtrace available. Use a linux OS to find backtrace.\n"); } #endif
GB_binop__gt_fp64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__gt_fp64) // A.*B function (eWiseMult): GB (_AemultB_08__gt_fp64) // A.*B function (eWiseMult): GB (_AemultB_02__gt_fp64) // A.*B function (eWiseMult): GB (_AemultB_04__gt_fp64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__gt_fp64) // A*D function (colscale): GB (_AxD__gt_fp64) // D*A function (rowscale): GB (_DxB__gt_fp64) // C+=B function (dense accum): GB (_Cdense_accumB__gt_fp64) // C+=b function (dense accum): GB (_Cdense_accumb__gt_fp64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__gt_fp64) // C=scalar+B GB (_bind1st__gt_fp64) // C=scalar+B' GB (_bind1st_tran__gt_fp64) // C=A+scalar GB (_bind2nd__gt_fp64) // C=A'+scalar GB (_bind2nd_tran__gt_fp64) // C type: bool // A type: double // A pattern? 0 // B type: double // B pattern? 0 // BinaryOp: cij = (aij > bij) #define GB_ATYPE \ double #define GB_BTYPE \ double #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ double aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ double bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x > y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_GT || GxB_NO_FP64 || GxB_NO_GT_FP64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__gt_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__gt_fp64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__gt_fp64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type double double bwork = (*((double *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__gt_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__gt_fp64) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__gt_fp64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; double alpha_scalar ; double beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((double *) alpha_scalar_in)) ; beta_scalar = (*((double *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__gt_fp64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__gt_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__gt_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__gt_fp64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__gt_fp64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; double x = (*((double *) x_input)) ; double *Bx = (double *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; double bij = GBX (Bx, p, false) ; Cx [p] = (x > bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__gt_fp64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; double *Ax = (double *) Ax_input ; double y = (*((double *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; double aij = GBX (Ax, p, false) ; Cx [p] = (aij > y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x > aij) ; \ } GrB_Info GB (_bind1st_tran__gt_fp64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ double #if GB_DISABLE return (GrB_NO_VALUE) ; #else double x = (*((const double *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ double } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij > y) ; \ } GrB_Info GB (_bind2nd_tran__gt_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double y = (*((const double *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
adam_op.h
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #pragma once #include <math.h> // for sqrt in CPU and CUDA #include <Eigen/Dense> #include <string> #include <unordered_map> #include <vector> #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/threadpool.h" #include "paddle/fluid/operators/jit/kernels.h" #include "paddle/fluid/operators/math/algorithm.h" #include "paddle/fluid/operators/math/selected_rows_functor.h" #include "paddle/fluid/platform/for_range.h" #include "paddle/fluid/platform/profiler.h" namespace paddle { namespace operators { namespace scatter = paddle::operators::math::scatter; static inline float GetAttrFromTensor(const framework::Tensor* tensor) { const float* tensor_data = tensor->data<float>(); framework::Tensor cpu_tensor; if (platform::is_gpu_place(tensor->place())) { paddle::framework::TensorCopySync(*tensor, platform::CPUPlace(), &cpu_tensor); tensor_data = cpu_tensor.data<float>(); } if (platform::is_xpu_place(tensor->place())) { paddle::framework::TensorCopySync(*tensor, platform::CPUPlace(), &cpu_tensor); tensor_data = cpu_tensor.data<float>(); } return tensor_data[0]; } class AdamOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override; framework::OpKernelType GetExpectedKernelType( const framework::ExecutionContext& ctx) const override; framework::OpKernelType GetKernelTypeForVar( const std::string& var_name, const framework::Tensor& tensor, const framework::OpKernelType& expected_kernel_type) const override; }; struct GPUAdam; struct CPUAdam; template <typename T, typename Flavour> class AdamFunctor; template <typename T> class AdamFunctor<T, GPUAdam> { private: T beta1_; T beta2_; T epsilon_; const T* beta1_pow_; const T* beta2_pow_; const T* moment1_; T* moment1_out_; const T* moment2_; T* moment2_out_; const T* lr_; const T* grad_; const T* param_; T* param_out_; public: AdamFunctor(T beta1, T beta2, T epsilon, const T* beta1_pow, const T* beta2_pow, const T* mom1, T* mom1_out, const T* mom2, T* mom2_out, const T* lr, const T* grad, const T* param, T* param_out) : beta1_(beta1), beta2_(beta2), epsilon_(epsilon), beta1_pow_(beta1_pow), beta2_pow_(beta2_pow), moment1_(mom1), moment1_out_(mom1_out), moment2_(mom2), moment2_out_(mom2_out), lr_(lr), grad_(grad), param_(param), param_out_(param_out) {} inline HOSTDEVICE void operator()(size_t i) const { // Merge all memory access together. T g = grad_[i]; T mom1 = moment1_[i]; T mom2 = moment2_[i]; T lr = *lr_; T beta1_pow = *beta1_pow_; T beta2_pow = *beta2_pow_; T p = param_[i]; // Calculation lr *= sqrt(1 - beta2_pow) / (1 - beta1_pow); mom1 = beta1_ * mom1 + (1 - beta1_) * g; mom2 = beta2_ * mom2 + (1 - beta2_) * g * g; p -= lr * (mom1 / (sqrt(mom2) + epsilon_ * sqrt(1 - beta2_pow))); // Write back to global memory moment1_out_[i] = mom1; moment2_out_[i] = mom2; param_out_[i] = p; } }; template <typename T> class AdamFunctor<T, CPUAdam> { private: T beta1_; T beta2_; T epsilon_; const T* beta1_pow_; const T* beta2_pow_; const T* moment1_; T* moment1_out_; const T* moment2_; T* moment2_out_; const T* lr_; const T* grad_; const T* param_; T* param_out_; public: AdamFunctor(T beta1, T beta2, T epsilon, const T* beta1_pow, const T* beta2_pow, const T* mom1, T* mom1_out, const T* mom2, T* mom2_out, const T* lr, const T* grad, const T* param, T* param_out) : beta1_(beta1), beta2_(beta2), epsilon_(epsilon), beta1_pow_(beta1_pow), beta2_pow_(beta2_pow), moment1_(mom1), moment1_out_(mom1_out), moment2_(mom2), moment2_out_(mom2_out), lr_(lr), grad_(grad), param_(param), param_out_(param_out) {} void operator()(size_t numel) const { Eigen::Map<const Eigen::Array<T, 1, Eigen::Dynamic>> g{ grad_, static_cast<Eigen::Index>(numel)}; Eigen::Map<const Eigen::Array<T, 1, Eigen::Dynamic>> mom1{ moment1_, static_cast<Eigen::Index>(numel)}; Eigen::Map<const Eigen::Array<T, 1, Eigen::Dynamic>> mom2{ moment2_, static_cast<Eigen::Index>(numel)}; Eigen::Map<const Eigen::Array<T, 1, Eigen::Dynamic>> param{ param_, static_cast<Eigen::Index>(numel)}; Eigen::Map<Eigen::Array<T, 1, Eigen::Dynamic>> param_out{ param_out_, static_cast<Eigen::Index>(numel)}; Eigen::Map<Eigen::Array<T, 1, Eigen::Dynamic>> moment1_out{ moment1_out_, static_cast<Eigen::Index>(numel)}; Eigen::Map<Eigen::Array<T, 1, Eigen::Dynamic>> moment2_out{ moment2_out_, static_cast<Eigen::Index>(numel)}; T lr = *lr_; T beta1_pow = *beta1_pow_; T beta2_pow = *beta2_pow_; // Calculation lr *= sqrt(1 - beta2_pow) / (1 - beta1_pow); moment1_out = beta1_ * mom1 + (1 - beta1_) * g; moment2_out = beta2_ * mom2 + (1 - beta2_) * g * g; param_out = param - lr * (moment1_out / (moment2_out.sqrt() + epsilon_ * sqrt(1 - beta2_pow))); } }; template <typename T, typename Flavour, typename MT = T> class SparseAdamFunctor; template <typename T, typename MT> class SparseAdamFunctor<T, GPUAdam, MT> { private: MT beta1_; MT beta2_; MT epsilon_; const MT* beta1_pow_; const MT* beta2_pow_; const MT* moment1_; MT* moment1_out_; const MT* moment2_; MT* moment2_out_; const MT* lr_; const T* grad_; const T* param_; T* param_out_; const MT* master_param_; MT* master_param_out_; const int64_t* rows_; int64_t row_numel_; int64_t row_count_; bool lazy_mode_; public: SparseAdamFunctor(MT beta1, MT beta2, MT epsilon, const MT* beta1_pow, const MT* beta2_pow, const MT* mom1, MT* mom1_out, const MT* mom2, MT* mom2_out, const MT* lr, const T* grad, const T* param, T* param_out, const MT* master_param, MT* master_param_out, const int64_t* rows, int64_t row_numel, int64_t row_count, bool lazy_mode) : beta1_(beta1), beta2_(beta2), epsilon_(epsilon), beta1_pow_(beta1_pow), beta2_pow_(beta2_pow), moment1_(mom1), moment1_out_(mom1_out), moment2_(mom2), moment2_out_(mom2_out), lr_(lr), grad_(grad), param_(param), param_out_(param_out), master_param_(master_param), master_param_out_(master_param_out), rows_(rows), row_numel_(row_numel), row_count_(row_count), lazy_mode_(lazy_mode) {} inline HOSTDEVICE void adam_update(size_t i, MT g) const { // The following code is the same as dense MT mom1 = moment1_[i]; MT mom2 = moment2_[i]; MT lr = *lr_; MT beta1_pow = *beta1_pow_; MT beta2_pow = *beta2_pow_; MT p = master_param_ ? master_param_[i] : static_cast<MT>(param_[i]); // Calculation lr *= sqrt(static_cast<MT>(1.0) - beta2_pow) / (static_cast<MT>(1.0) - beta1_pow); mom1 = beta1_ * mom1 + (static_cast<MT>(1.0) - beta1_) * g; mom2 = beta2_ * mom2 + (static_cast<MT>(1.0) - beta2_) * g * g; p -= lr * (mom1 / (sqrt(mom2) + epsilon_ * sqrt(static_cast<MT>(1.0) - beta2_pow))); // Write back to global memory moment1_out_[i] = mom1; moment2_out_[i] = mom2; param_out_[i] = static_cast<T>(p); if (master_param_out_) { master_param_out_[i] = p; } } inline HOSTDEVICE void operator()(size_t i) const { auto row_idx = math::BinarySearch<int64_t>(rows_, row_count_, i / row_numel_); if (lazy_mode_ && row_idx < 0) { return; } else { MT g = row_idx >= 0 ? static_cast<MT>(grad_[row_idx * row_numel_ + i % row_numel_]) : static_cast<MT>(0); adam_update(i, g); } } }; template <typename T> class SparseAdamFunctor<T, CPUAdam, T> { private: T beta1_; T beta2_; T epsilon_; const T* beta1_pow_; const T* beta2_pow_; const T* moment1_; T* moment1_out_; const T* moment2_; T* moment2_out_; const T* lr_; const T* grad_; const T* param_; T* param_out_; const int64_t* rows_; int64_t row_numel_; int64_t row_count_; public: SparseAdamFunctor(T beta1, T beta2, T epsilon, const T* beta1_pow, const T* beta2_pow, const T* mom1, T* mom1_out, const T* mom2, T* mom2_out, const T* lr, const T* grad, const T* param, T* param_out, const int64_t* rows, int64_t row_numel, int64_t row_count, bool lazy_mode) : beta1_(beta1), beta2_(beta2), epsilon_(epsilon), beta1_pow_(beta1_pow), beta2_pow_(beta2_pow), moment1_(mom1), moment1_out_(mom1_out), moment2_(mom2), moment2_out_(mom2_out), lr_(lr), grad_(grad), param_(param), param_out_(param_out), rows_(rows), row_numel_(row_numel), row_count_(row_count) {} inline HOSTDEVICE void adam_update(size_t i, T g) const { // The following code is the same as dense T mom1 = moment1_[i]; T mom2 = moment2_[i]; T lr = *lr_; T beta1_pow = *beta1_pow_; T beta2_pow = *beta2_pow_; T p = param_[i]; // Calculation lr *= sqrt(1 - beta2_pow) / (1 - beta1_pow); mom1 = beta1_ * mom1 + (1 - beta1_) * g; mom2 = beta2_ * mom2 + (1 - beta2_) * g * g; p -= lr * (mom1 / (sqrt(mom2) + epsilon_ * sqrt(1 - beta2_pow))); // Write back to global memory moment1_out_[i] = mom1; moment2_out_[i] = mom2; param_out_[i] = p; } inline void operator()(size_t numel) const { // lr could be reuse T lr = *lr_; T beta1_pow = *beta1_pow_; T beta2_pow = *beta2_pow_; lr *= sqrt(1 - beta2_pow) / (1 - beta1_pow); int64_t row_count = static_cast<int64_t>(numel / row_numel_); for (int64_t i = 0, j = 0; i != row_count; ++i) { if (i == *(rows_ + j)) { for (int64_t k = 0; k != row_numel_; ++k) { T g = grad_[j * row_numel_ + k]; adam_update(i * row_numel_ + k, g); } ++j; } else { for (int64_t k = 0; k != row_numel_; ++k) { T mom1 = moment1_[i * row_numel_ + k]; T mom2 = moment2_[i * row_numel_ + k]; T p = param_[i * row_numel_ + k]; mom1 = beta1_ * mom1; mom2 = beta2_ * mom2; p -= lr * (mom1 / (sqrt(mom2) + epsilon_)); // Write back to global memory moment1_out_[i * row_numel_ + k] = mom1; moment2_out_[i * row_numel_ + k] = mom2; param_out_[i * row_numel_ + k] = p; } } } } }; template <typename DeviceContext, typename T> class AdamOpKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { const auto* param_var = ctx.InputVar("Param"); PADDLE_ENFORCE_EQ(param_var->IsType<framework::LoDTensor>(), true, platform::errors::InvalidArgument( "The Var(%s)'s type should be LoDTensor, " "but the received is %s", ctx.InputNames("Param").front(), framework::ToTypeName(param_var->Type()))); using paddle::framework::LoDTensor; int64_t min_row_size_to_use_multithread = ctx.Attr<int64_t>("min_row_size_to_use_multithread"); bool lazy_mode = ctx.Attr<bool>("lazy_mode"); bool use_global_beta_pow = ctx.Attr<bool>("use_global_beta_pow"); VLOG(4) << "use_global_beta_pow:" << use_global_beta_pow; auto* param = ctx.Input<LoDTensor>("Param"); auto* grad_var = ctx.InputVar("Grad"); auto* mom1 = ctx.Input<LoDTensor>("Moment1"); auto* mom2 = ctx.Input<LoDTensor>("Moment2"); auto* lr = ctx.Input<LoDTensor>("LearningRate"); auto* beta1_pow = ctx.Input<LoDTensor>("Beta1Pow"); auto* beta2_pow = ctx.Input<LoDTensor>("Beta2Pow"); auto* param_out = ctx.Output<LoDTensor>("ParamOut"); auto* mom1_out = ctx.Output<LoDTensor>("Moment1Out"); auto* mom2_out = ctx.Output<LoDTensor>("Moment2Out"); auto* beta1_pow_out = ctx.Output<LoDTensor>("Beta1PowOut"); auto* beta2_pow_out = ctx.Output<LoDTensor>("Beta2PowOut"); bool skip_update = false; if (ctx.HasInput("SkipUpdate")) { auto* skip_update_tensor = ctx.Input<framework::Tensor>("SkipUpdate"); PADDLE_ENFORCE_EQ(skip_update_tensor->numel(), 1, platform::errors::InvalidArgument( "Input(SkipUpdate) size must be 1, but get %d", skip_update_tensor->numel())); std::vector<bool> skip_update_vec; paddle::framework::TensorToVector(*skip_update_tensor, ctx.device_context(), &skip_update_vec); skip_update = skip_update_vec[0]; } // skip_update=true, just copy input to output, and TensorCopy will call // mutable_data if (skip_update) { VLOG(4) << "Adam skip update"; framework::TensorCopy( *param, ctx.GetPlace(), ctx.template device_context<platform::DeviceContext>(), param_out); framework::TensorCopy( *mom1, ctx.GetPlace(), ctx.template device_context<platform::DeviceContext>(), mom1_out); framework::TensorCopy( *mom2, ctx.GetPlace(), ctx.template device_context<platform::DeviceContext>(), mom2_out); framework::TensorCopy( *beta1_pow, ctx.GetPlace(), ctx.template device_context<platform::DeviceContext>(), beta1_pow_out); framework::TensorCopy( *beta2_pow, ctx.GetPlace(), ctx.template device_context<platform::DeviceContext>(), beta2_pow_out); return; } T beta1 = static_cast<T>(ctx.Attr<float>("beta1")); if (ctx.HasInput("Beta1Tensor")) { auto* beta1_tensor = ctx.Input<framework::Tensor>("Beta1Tensor"); PADDLE_ENFORCE_EQ(beta1_tensor->numel(), 1, platform::errors::InvalidArgument( "Input(Beta1Tensor) size must be 1, but get %d", beta1_tensor->numel())); beta1 = static_cast<T>(GetAttrFromTensor(beta1_tensor)); } T beta2 = static_cast<T>(ctx.Attr<float>("beta2")); if (ctx.HasInput("Beta2Tensor")) { auto* beta2_tensor = ctx.Input<framework::Tensor>("Beta2Tensor"); PADDLE_ENFORCE_EQ(beta2_tensor->numel(), 1, platform::errors::InvalidArgument( "Input(Beta2Tensor) size must be 1, but get %d", beta2_tensor->numel())); beta2 = static_cast<T>(GetAttrFromTensor(beta2_tensor)); } T epsilon = static_cast<T>(ctx.Attr<float>("epsilon")); if (ctx.HasInput("EpsilonTensor")) { auto* epsilon_tensor = ctx.Input<framework::Tensor>("EpsilonTensor"); PADDLE_ENFORCE_EQ(epsilon_tensor->numel(), 1, platform::errors::InvalidArgument( "Input(EpsilonTensor) size must be 1, but get %d", epsilon_tensor->numel())); epsilon = static_cast<T>(GetAttrFromTensor(epsilon_tensor)); } VLOG(3) << "beta1_pow.numel() : " << beta1_pow->numel() << "beta2_pow.numel() : " << beta2_pow->numel(); VLOG(3) << "param.numel(): " << param->numel(); PADDLE_ENFORCE_EQ(beta1_pow_out->numel(), 1, platform::errors::InvalidArgument( "beta1 pow output size should be 1, but received " "value is:%d.", beta1_pow_out->numel())); PADDLE_ENFORCE_EQ(beta2_pow_out->numel(), 1, platform::errors::InvalidArgument( "beta2 pow output size should be 1, but received " "value is:%d.", beta2_pow_out->numel())); if (grad_var->IsType<framework::LoDTensor>()) { T beta1_p = beta1_pow->data<T>()[0]; T beta2_p = beta2_pow->data<T>()[0]; if (!use_global_beta_pow) { beta1_pow_out->mutable_data<T>(ctx.GetPlace())[0] = beta1 * beta1_pow->data<T>()[0]; beta2_pow_out->mutable_data<T>(ctx.GetPlace())[0] = beta2 * beta2_pow->data<T>()[0]; } auto* grad = ctx.Input<LoDTensor>("Grad"); T* param_out_ptr = param_out->mutable_data<T>(ctx.GetPlace()); T* mom1_out_ptr = mom1_out->mutable_data<T>(ctx.GetPlace()); T* mom2_out_ptr = mom2_out->mutable_data<T>(ctx.GetPlace()); T learning_rate = lr->data<T>()[0] * (sqrt(1 - beta2_p) / (1 - beta1_p)); T eps = epsilon * sqrt(1 - beta2_p); jit::adam_attr_t attr(beta1, beta2); int64_t numel = param->numel(); const T* param_ptr = param->data<T>(); const T* mom1_ptr = mom1->data<T>(); const T* mom2_ptr = mom2->data<T>(); const T* grad_ptr = grad->data<T>(); auto adam = jit::KernelFuncs<jit::AdamTuple<T>, platform::CPUPlace>::Cache().At( attr); static constexpr int64_t chunk_size = 512; #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int64_t i = 0; i < numel / chunk_size; ++i) { const int64_t offset = i * chunk_size; adam(beta1, beta2, -learning_rate, eps, chunk_size, grad_ptr + offset, mom1_ptr + offset, mom2_ptr + offset, param_ptr + offset, mom1_out_ptr + offset, mom2_out_ptr + offset, param_out_ptr + offset); } if (numel % chunk_size != 0) { const int64_t offset = (numel / chunk_size) * chunk_size; const int64_t tail_numel = numel % chunk_size; adam(beta1, beta2, -learning_rate, eps, tail_numel, grad_ptr + offset, mom1_ptr + offset, mom2_ptr + offset, param_ptr + offset, mom1_out_ptr + offset, mom2_out_ptr + offset, param_out_ptr + offset); } } else if (grad_var->IsType<pten::SelectedRows>()) { auto* grad = ctx.Input<pten::SelectedRows>("Grad"); if (grad->rows().size() == 0) { VLOG(3) << "grad row size is 0!!"; return; } std::vector<int64_t> cpu_rows(grad->rows().begin(), grad->rows().end()); bool is_strict_sorted = true; for (size_t i = 1; i < cpu_rows.size(); ++i) { if (cpu_rows[i - 1] >= cpu_rows[i]) { is_strict_sorted = false; break; } } pten::SelectedRows tmp_grad_merge; const pten::SelectedRows* grad_merge_ptr; if (is_strict_sorted) { grad_merge_ptr = grad; } else { // merge duplicated rows if any. // The rows of grad_merge have been sorted inside MergeAdd functor scatter::MergeAdd<DeviceContext, T> merge_func; merge_func(ctx.template device_context<DeviceContext>(), *grad, &tmp_grad_merge, true); grad_merge_ptr = &tmp_grad_merge; } auto& grad_merge = *grad_merge_ptr; auto& grad_tensor = grad_merge.value(); const T* grad_data = grad_tensor.template data<T>(); const int64_t* rows = grad_merge.rows().Data(ctx.GetPlace()); auto row_numel = grad_tensor.numel() / grad_merge.rows().size(); SparseAdamFunctor<T, CPUAdam> functor( beta1, beta2, epsilon, beta1_pow->data<T>(), beta2_pow->data<T>(), mom1->data<T>(), mom1_out->mutable_data<T>(ctx.GetPlace()), mom2->data<T>(), mom2_out->mutable_data<T>(ctx.GetPlace()), lr->data<T>(), grad_data, param->data<T>(), param_out->mutable_data<T>(ctx.GetPlace()), rows, row_numel, grad_merge.rows().size(), lazy_mode); // update beta1 and beta2 if (!use_global_beta_pow) { beta1_pow_out->mutable_data<T>(ctx.GetPlace())[0] = beta1 * beta1_pow->data<T>()[0]; beta2_pow_out->mutable_data<T>(ctx.GetPlace())[0] = beta2 * beta2_pow->data<T>()[0]; } if (lazy_mode) { VLOG(3) << "run cpu lazy mode"; size_t row_count = grad_merge.rows().size(); std::vector<int64_t> cpu_rows(grad_merge.rows()); for (size_t row_index = 0; row_index < row_count; ++row_index) { for (size_t offset = 0; offset < row_numel; ++offset) { size_t i = cpu_rows[row_index] * row_numel + offset; functor.adam_update(i, grad_data[row_index * row_numel + offset]); } } } #ifndef _WIN32 else if (FLAGS_inner_op_parallelism > 1 && // NOLINT min_row_size_to_use_multithread > 0 && param->dims()[0] > min_row_size_to_use_multithread) { VLOG(3) << "use multi thread, inner_op_parallelism=" << FLAGS_inner_op_parallelism << " min_row_size_to_use_multithread=" << min_row_size_to_use_multithread; if (FLAGS_inner_op_parallelism > 10) { VLOG(1) << "FLAGS_inner_op_parallelism " << FLAGS_inner_op_parallelism << " is two large!"; } auto& grad_rows = grad_merge.rows(); std::unordered_map<size_t, int> row_id_to_grad_row_offset; size_t param_row_count = param->numel() / row_numel; if (param_row_count < 1000) { VLOG(1) << "param_row_count should be larger then 1000 to use " "multi thread, currently " << param_row_count; } for (size_t i = 0; i < grad_rows.size(); ++i) { row_id_to_grad_row_offset[grad_rows[i]] = i; } std::vector<std::future<void>> fs; int64_t line_in_each_thread = param_row_count / FLAGS_inner_op_parallelism + 1; for (int i = 0; i < FLAGS_inner_op_parallelism; ++i) { int64_t start = i * line_in_each_thread; int64_t end = (i + 1) * line_in_each_thread; if (start >= static_cast<int64_t>(param_row_count)) { break; } if (end > static_cast<int64_t>(param_row_count)) { end = static_cast<int64_t>(param_row_count); } fs.push_back(framework::Async([&functor, &row_id_to_grad_row_offset, &grad_data, row_numel, start, end]() { for (int64_t row_id = start; row_id < end; ++row_id) { auto iter = row_id_to_grad_row_offset.find(row_id); if (iter != row_id_to_grad_row_offset.end()) { for (size_t row_offset = 0U; row_offset < row_numel; ++row_offset) { functor.adam_update( row_id * row_numel + row_offset, grad_data[iter->second * row_numel + row_offset]); } } else { for (size_t row_offset = 0U; row_offset < row_numel; ++row_offset) { functor.adam_update(row_id * row_numel + row_offset, 0); } } } })); } for (size_t i = 0; i < fs.size(); ++i) fs[i].wait(); } #endif // !_WIN32 else { // NOLINT functor(param->numel()); } } else { PADDLE_THROW(platform::errors::InvalidArgument( "Variable type not supported by adam_op")); } } }; } // namespace operators } // namespace paddle
feature.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % FFFFF EEEEE AAA TTTTT U U RRRR EEEEE % % F E A A T U U R R E % % FFF EEE AAAAA T U U RRRR EEE % % F E A A T U U R R E % % F EEEEE A A T UUU R R EEEEE % % % % % % MagickCore Image Feature Methods % % % % Software Design % % John Cristy % % July 1992 % % % % % % Copyright 1999-2011 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/property.h" #include "magick/animate.h" #include "magick/blob.h" #include "magick/blob-private.h" #include "magick/cache.h" #include "magick/cache-private.h" #include "magick/cache-view.h" #include "magick/client.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/composite.h" #include "magick/composite-private.h" #include "magick/compress.h" #include "magick/constitute.h" #include "magick/deprecate.h" #include "magick/display.h" #include "magick/draw.h" #include "magick/enhance.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/feature.h" #include "magick/gem.h" #include "magick/geometry.h" #include "magick/list.h" #include "magick/image-private.h" #include "magick/magic.h" #include "magick/magick.h" #include "magick/memory_.h" #include "magick/module.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/paint.h" #include "magick/pixel-private.h" #include "magick/profile.h" #include "magick/quantize.h" #include "magick/random_.h" #include "magick/segment.h" #include "magick/semaphore.h" #include "magick/signature-private.h" #include "magick/string_.h" #include "magick/thread-private.h" #include "magick/timer.h" #include "magick/utility.h" #include "magick/version.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C h a n n e l F e a t u r e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageChannelFeatures() returns features for each channel in the image in % each of four directions (horizontal, vertical, left and right diagonals) % for the specified distance. The features include the angular second % moment, contrast, correlation, sum of squares: variance, inverse difference % moment, sum average, sum varience, sum entropy, entropy, difference variance,% difference entropy, information measures of correlation 1, information % measures of correlation 2, and maximum correlation coefficient. You can % access the red channel contrast, for example, like this: % % channel_features=GetImageChannelFeatures(image,1,exception); % contrast=channel_features[RedChannel].contrast[0]; % % Use MagickRelinquishMemory() to free the features buffer. % % The format of the GetImageChannelFeatures method is: % % ChannelFeatures *GetImageChannelFeatures(const Image *image, % const size_t distance,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o distance: the distance. % % o exception: return any errors or warnings in this structure. % */ static inline ssize_t MagickAbsoluteValue(const ssize_t x) { if (x < 0) return(-x); return(x); } MagickExport ChannelFeatures *GetImageChannelFeatures(const Image *image, const size_t distance,ExceptionInfo *exception) { typedef struct _ChannelStatistics { DoublePixelPacket direction[4]; /* horizontal, vertical, left and right diagonals */ } ChannelStatistics; CacheView *image_view; ChannelFeatures *channel_features; ChannelStatistics **cooccurrence, correlation, *density_x, *density_xy, *density_y, entropy_x, entropy_xy, entropy_xy1, entropy_xy2, entropy_y, mean, **Q, *sum, sum_squares, variance; LongPixelPacket gray, *grays; MagickBooleanType status; register ssize_t i; size_t length; ssize_t y, z; unsigned int number_grays; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((image->columns < (distance+1)) || (image->rows < (distance+1))) return((ChannelFeatures *) NULL); length=AllChannels+1UL; channel_features=(ChannelFeatures *) AcquireQuantumMemory(length, sizeof(*channel_features)); if (channel_features == (ChannelFeatures *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) ResetMagickMemory(channel_features,0,length* sizeof(*channel_features)); /* Form grays. */ grays=(LongPixelPacket *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*grays)); if (grays == (LongPixelPacket *) NULL) { channel_features=(ChannelFeatures *) RelinquishMagickMemory( channel_features); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(channel_features); } for (i=0; i <= (ssize_t) MaxMap; i++) { grays[i].red=(~0U); grays[i].green=(~0U); grays[i].blue=(~0U); grays[i].opacity=(~0U); grays[i].index=(~0U); } status=MagickTrue; image_view=AcquireCacheView(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(status) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { grays[ScaleQuantumToMap(p->red)].red= ScaleQuantumToMap(p->red); grays[ScaleQuantumToMap(p->green)].green= ScaleQuantumToMap(p->green); grays[ScaleQuantumToMap(p->blue)].blue= ScaleQuantumToMap(p->blue); if (image->matte != MagickFalse) grays[ScaleQuantumToMap(p->opacity)].opacity= ScaleQuantumToMap(p->opacity); if (image->colorspace == CMYKColorspace) grays[ScaleQuantumToMap(indexes[x])].index= ScaleQuantumToMap(indexes[x]); p++; } } image_view=DestroyCacheView(image_view); if (status == MagickFalse) { grays=(LongPixelPacket *) RelinquishMagickMemory(grays); channel_features=(ChannelFeatures *) RelinquishMagickMemory( channel_features); return(channel_features); } (void) ResetMagickMemory(&gray,0,sizeof(gray)); for (i=0; i <= (ssize_t) MaxMap; i++) { if (grays[i].red != ~0U) grays[gray.red++].red=grays[i].red; if (grays[i].green != ~0U) grays[gray.green++].green=grays[i].green; if (grays[i].blue != ~0U) grays[gray.blue++].blue=grays[i].blue; if (image->matte != MagickFalse) if (grays[i].opacity != ~0U) grays[gray.opacity++].opacity=grays[i].opacity; if (image->colorspace == CMYKColorspace) if (grays[i].index != ~0U) grays[gray.index++].index=grays[i].index; } /* Allocate spatial dependence matrix. */ number_grays=gray.red; if (gray.green > number_grays) number_grays=gray.green; if (gray.blue > number_grays) number_grays=gray.blue; if (image->matte != MagickFalse) if (gray.opacity > number_grays) number_grays=gray.opacity; if (image->colorspace == CMYKColorspace) if (gray.index > number_grays) number_grays=gray.index; cooccurrence=(ChannelStatistics **) AcquireQuantumMemory(number_grays, sizeof(*cooccurrence)); density_x=(ChannelStatistics *) AcquireQuantumMemory(2*(number_grays+1), sizeof(*density_x)); density_xy=(ChannelStatistics *) AcquireQuantumMemory(2*(number_grays+1), sizeof(*density_xy)); density_y=(ChannelStatistics *) AcquireQuantumMemory(2*(number_grays+1), sizeof(*density_y)); Q=(ChannelStatistics **) AcquireQuantumMemory(number_grays,sizeof(*Q)); sum=(ChannelStatistics *) AcquireQuantumMemory(number_grays,sizeof(*sum)); if ((cooccurrence == (ChannelStatistics **) NULL) || (density_x == (ChannelStatistics *) NULL) || (density_xy == (ChannelStatistics *) NULL) || (density_y == (ChannelStatistics *) NULL) || (Q == (ChannelStatistics **) NULL) || (sum == (ChannelStatistics *) NULL)) { if (Q != (ChannelStatistics **) NULL) { for (i=0; i < (ssize_t) number_grays; i++) Q[i]=(ChannelStatistics *) RelinquishMagickMemory(Q[i]); Q=(ChannelStatistics **) RelinquishMagickMemory(Q); } if (sum != (ChannelStatistics *) NULL) sum=(ChannelStatistics *) RelinquishMagickMemory(sum); if (density_y != (ChannelStatistics *) NULL) density_y=(ChannelStatistics *) RelinquishMagickMemory(density_y); if (density_xy != (ChannelStatistics *) NULL) density_xy=(ChannelStatistics *) RelinquishMagickMemory(density_xy); if (density_x != (ChannelStatistics *) NULL) density_x=(ChannelStatistics *) RelinquishMagickMemory(density_x); if (cooccurrence != (ChannelStatistics **) NULL) { for (i=0; i < (ssize_t) number_grays; i++) cooccurrence[i]=(ChannelStatistics *) RelinquishMagickMemory(cooccurrence[i]); cooccurrence=(ChannelStatistics **) RelinquishMagickMemory( cooccurrence); } grays=(LongPixelPacket *) RelinquishMagickMemory(grays); channel_features=(ChannelFeatures *) RelinquishMagickMemory( channel_features); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(channel_features); } (void) ResetMagickMemory(&correlation,0,sizeof(correlation)); (void) ResetMagickMemory(density_x,0,2*(number_grays+1)*sizeof(*density_x)); (void) ResetMagickMemory(density_xy,0,2*(number_grays+1)*sizeof(*density_xy)); (void) ResetMagickMemory(density_y,0,2*(number_grays+1)*sizeof(*density_y)); (void) ResetMagickMemory(&mean,0,sizeof(mean)); (void) ResetMagickMemory(sum,0,number_grays*sizeof(*sum)); (void) ResetMagickMemory(&sum_squares,0,sizeof(sum_squares)); (void) ResetMagickMemory(density_xy,0,2*number_grays*sizeof(*density_xy)); (void) ResetMagickMemory(&entropy_x,0,sizeof(entropy_x)); (void) ResetMagickMemory(&entropy_xy,0,sizeof(entropy_xy)); (void) ResetMagickMemory(&entropy_xy1,0,sizeof(entropy_xy1)); (void) ResetMagickMemory(&entropy_xy2,0,sizeof(entropy_xy2)); (void) ResetMagickMemory(&entropy_y,0,sizeof(entropy_y)); (void) ResetMagickMemory(&variance,0,sizeof(variance)); for (i=0; i < (ssize_t) number_grays; i++) { cooccurrence[i]=(ChannelStatistics *) AcquireQuantumMemory(number_grays, sizeof(**cooccurrence)); Q[i]=(ChannelStatistics *) AcquireQuantumMemory(number_grays,sizeof(**Q)); if ((cooccurrence[i] == (ChannelStatistics *) NULL) || (Q[i] == (ChannelStatistics *) NULL)) break; (void) ResetMagickMemory(cooccurrence[i],0,number_grays* sizeof(**cooccurrence)); (void) ResetMagickMemory(Q[i],0,number_grays*sizeof(**Q)); } if (i < (ssize_t) number_grays) { for (i--; i >= 0; i--) { if (Q[i] != (ChannelStatistics *) NULL) Q[i]=(ChannelStatistics *) RelinquishMagickMemory(Q[i]); if (cooccurrence[i] != (ChannelStatistics *) NULL) cooccurrence[i]=(ChannelStatistics *) RelinquishMagickMemory(cooccurrence[i]); } Q=(ChannelStatistics **) RelinquishMagickMemory(Q); cooccurrence=(ChannelStatistics **) RelinquishMagickMemory(cooccurrence); sum=(ChannelStatistics *) RelinquishMagickMemory(sum); density_y=(ChannelStatistics *) RelinquishMagickMemory(density_y); density_xy=(ChannelStatistics *) RelinquishMagickMemory(density_xy); density_x=(ChannelStatistics *) RelinquishMagickMemory(density_x); grays=(LongPixelPacket *) RelinquishMagickMemory(grays); channel_features=(ChannelFeatures *) RelinquishMagickMemory( channel_features); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(channel_features); } /* Initialize spatial dependence matrix. */ status=MagickTrue; image_view=AcquireCacheView(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(status) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register ssize_t x; ssize_t offset, u, v; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-(ssize_t) distance,y,image->columns+ 2*distance,distance+1,exception); if (p == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); p+=distance; indexes+=distance; for (x=0; x < (ssize_t) image->columns; x++) { for (i=0; i < 4; i++) { switch (i) { case 0: default: { /* Horizontal adjacency. */ offset=(ssize_t) distance; break; } case 1: { /* Vertical adjacency. */ offset=(ssize_t) (image->columns+2*distance); break; } case 2: { /* Right diagonal adjacency. */ offset=(ssize_t) ((image->columns+2*distance)-distance); break; } case 3: { /* Left diagonal adjacency. */ offset=(ssize_t) ((image->columns+2*distance)+distance); break; } } u=0; v=0; while (grays[u].red != ScaleQuantumToMap(p->red)) u++; while (grays[v].red != ScaleQuantumToMap((p+offset)->red)) v++; cooccurrence[u][v].direction[i].red++; cooccurrence[v][u].direction[i].red++; u=0; v=0; while (grays[u].green != ScaleQuantumToMap(p->green)) u++; while (grays[v].green != ScaleQuantumToMap((p+offset)->green)) v++; cooccurrence[u][v].direction[i].green++; cooccurrence[v][u].direction[i].green++; u=0; v=0; while (grays[u].blue != ScaleQuantumToMap(p->blue)) u++; while (grays[v].blue != ScaleQuantumToMap((p+offset)->blue)) v++; cooccurrence[u][v].direction[i].blue++; cooccurrence[v][u].direction[i].blue++; if (image->matte != MagickFalse) { u=0; v=0; while (grays[u].opacity != ScaleQuantumToMap(p->opacity)) u++; while (grays[v].opacity != ScaleQuantumToMap((p+offset)->opacity)) v++; cooccurrence[u][v].direction[i].opacity++; cooccurrence[v][u].direction[i].opacity++; } if (image->colorspace == CMYKColorspace) { u=0; v=0; while (grays[u].index != ScaleQuantumToMap(indexes[x])) u++; while (grays[v].index != ScaleQuantumToMap(indexes[x+offset])) v++; cooccurrence[u][v].direction[i].index++; cooccurrence[v][u].direction[i].index++; } } p++; } } grays=(LongPixelPacket *) RelinquishMagickMemory(grays); image_view=DestroyCacheView(image_view); if (status == MagickFalse) { for (i=0; i < (ssize_t) number_grays; i++) cooccurrence[i]=(ChannelStatistics *) RelinquishMagickMemory(cooccurrence[i]); cooccurrence=(ChannelStatistics **) RelinquishMagickMemory(cooccurrence); channel_features=(ChannelFeatures *) RelinquishMagickMemory( channel_features); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(channel_features); } /* Normalize spatial dependence matrix. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(status) #endif for (i=0; i < 4; i++) { double normalize; switch (i) { case 0: default: { /* Horizontal adjacency. */ normalize=2.0*image->rows*(image->columns-distance); break; } case 1: { /* Vertical adjacency. */ normalize=2.0*(image->rows-distance)*image->columns; break; } case 2: { /* Right diagonal adjacency. */ normalize=2.0*(image->rows-distance)*(image->columns-distance); break; } case 3: { /* Left diagonal adjacency. */ normalize=2.0*(image->rows-distance)*(image->columns-distance); break; } } for (y=0; y < (ssize_t) number_grays; y++) { register ssize_t x; for (x=0; x < (ssize_t) number_grays; x++) { cooccurrence[x][y].direction[i].red/=normalize; cooccurrence[x][y].direction[i].green/=normalize; cooccurrence[x][y].direction[i].blue/=normalize; if (image->matte != MagickFalse) cooccurrence[x][y].direction[i].opacity/=normalize; if (image->colorspace == CMYKColorspace) cooccurrence[x][y].direction[i].index/=normalize; } } } /* Compute texture features. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(status) #endif for (i=0; i < 4; i++) { register ssize_t y; for (y=0; y < (ssize_t) number_grays; y++) { register ssize_t x; for (x=0; x < (ssize_t) number_grays; x++) { /* Angular second moment: measure of homogeneity of the image. */ channel_features[RedChannel].angular_second_moment[i]+= cooccurrence[x][y].direction[i].red* cooccurrence[x][y].direction[i].red; channel_features[GreenChannel].angular_second_moment[i]+= cooccurrence[x][y].direction[i].green* cooccurrence[x][y].direction[i].green; channel_features[BlueChannel].angular_second_moment[i]+= cooccurrence[x][y].direction[i].blue* cooccurrence[x][y].direction[i].blue; if (image->matte != MagickFalse) channel_features[OpacityChannel].angular_second_moment[i]+= cooccurrence[x][y].direction[i].opacity* cooccurrence[x][y].direction[i].opacity; if (image->colorspace == CMYKColorspace) channel_features[BlackChannel].angular_second_moment[i]+= cooccurrence[x][y].direction[i].index* cooccurrence[x][y].direction[i].index; /* Correlation: measure of linear-dependencies in the image. */ sum[y].direction[i].red+=cooccurrence[x][y].direction[i].red; sum[y].direction[i].green+=cooccurrence[x][y].direction[i].green; sum[y].direction[i].blue+=cooccurrence[x][y].direction[i].blue; if (image->matte != MagickFalse) sum[y].direction[i].opacity+=cooccurrence[x][y].direction[i].opacity; if (image->colorspace == CMYKColorspace) sum[y].direction[i].index+=cooccurrence[x][y].direction[i].index; correlation.direction[i].red+=x*y*cooccurrence[x][y].direction[i].red; correlation.direction[i].green+=x*y* cooccurrence[x][y].direction[i].green; correlation.direction[i].blue+=x*y* cooccurrence[x][y].direction[i].blue; if (image->matte != MagickFalse) correlation.direction[i].opacity+=x*y* cooccurrence[x][y].direction[i].opacity; if (image->colorspace == CMYKColorspace) correlation.direction[i].index+=x*y* cooccurrence[x][y].direction[i].index; /* Inverse Difference Moment. */ channel_features[RedChannel].inverse_difference_moment[i]+= cooccurrence[x][y].direction[i].red/((y-x)*(y-x)+1); channel_features[GreenChannel].inverse_difference_moment[i]+= cooccurrence[x][y].direction[i].green/((y-x)*(y-x)+1); channel_features[BlueChannel].inverse_difference_moment[i]+= cooccurrence[x][y].direction[i].blue/((y-x)*(y-x)+1); if (image->matte != MagickFalse) channel_features[OpacityChannel].inverse_difference_moment[i]+= cooccurrence[x][y].direction[i].opacity/((y-x)*(y-x)+1); if (image->colorspace == CMYKColorspace) channel_features[IndexChannel].inverse_difference_moment[i]+= cooccurrence[x][y].direction[i].index/((y-x)*(y-x)+1); /* Sum average. */ density_xy[y+x+2].direction[i].red+= cooccurrence[x][y].direction[i].red; density_xy[y+x+2].direction[i].green+= cooccurrence[x][y].direction[i].green; density_xy[y+x+2].direction[i].blue+= cooccurrence[x][y].direction[i].blue; if (image->matte != MagickFalse) density_xy[y+x+2].direction[i].opacity+= cooccurrence[x][y].direction[i].opacity; if (image->colorspace == CMYKColorspace) density_xy[y+x+2].direction[i].index+= cooccurrence[x][y].direction[i].index; /* Entropy. */ channel_features[RedChannel].entropy[i]-= cooccurrence[x][y].direction[i].red* log10(cooccurrence[x][y].direction[i].red+MagickEpsilon); channel_features[GreenChannel].entropy[i]-= cooccurrence[x][y].direction[i].green* log10(cooccurrence[x][y].direction[i].green+MagickEpsilon); channel_features[BlueChannel].entropy[i]-= cooccurrence[x][y].direction[i].blue* log10(cooccurrence[x][y].direction[i].blue+MagickEpsilon); if (image->matte != MagickFalse) channel_features[OpacityChannel].entropy[i]-= cooccurrence[x][y].direction[i].opacity* log10(cooccurrence[x][y].direction[i].opacity+MagickEpsilon); if (image->colorspace == CMYKColorspace) channel_features[IndexChannel].entropy[i]-= cooccurrence[x][y].direction[i].index* log10(cooccurrence[x][y].direction[i].index+MagickEpsilon); /* Information Measures of Correlation. */ density_x[x].direction[i].red+=cooccurrence[x][y].direction[i].red; density_x[x].direction[i].green+=cooccurrence[x][y].direction[i].green; density_x[x].direction[i].blue+=cooccurrence[x][y].direction[i].blue; if (image->matte != MagickFalse) density_x[x].direction[i].opacity+= cooccurrence[x][y].direction[i].opacity; if (image->colorspace == CMYKColorspace) density_x[x].direction[i].index+= cooccurrence[x][y].direction[i].index; density_y[y].direction[i].red+=cooccurrence[x][y].direction[i].red; density_y[y].direction[i].green+=cooccurrence[x][y].direction[i].green; density_y[y].direction[i].blue+=cooccurrence[x][y].direction[i].blue; if (image->matte != MagickFalse) density_y[y].direction[i].opacity+= cooccurrence[x][y].direction[i].opacity; if (image->colorspace == CMYKColorspace) density_y[y].direction[i].index+= cooccurrence[x][y].direction[i].index; } mean.direction[i].red+=y*sum[y].direction[i].red; sum_squares.direction[i].red+=y*y*sum[y].direction[i].red; mean.direction[i].green+=y*sum[y].direction[i].green; sum_squares.direction[i].green+=y*y*sum[y].direction[i].green; mean.direction[i].blue+=y*sum[y].direction[i].blue; sum_squares.direction[i].blue+=y*y*sum[y].direction[i].blue; if (image->matte != MagickFalse) { mean.direction[i].opacity+=y*sum[y].direction[i].opacity; sum_squares.direction[i].opacity+=y*y*sum[y].direction[i].opacity; } if (image->colorspace == CMYKColorspace) { mean.direction[i].index+=y*sum[y].direction[i].index; sum_squares.direction[i].index+=y*y*sum[y].direction[i].index; } } /* Correlation: measure of linear-dependencies in the image. */ channel_features[RedChannel].correlation[i]= (correlation.direction[i].red-mean.direction[i].red* mean.direction[i].red)/(sqrt(sum_squares.direction[i].red- (mean.direction[i].red*mean.direction[i].red))*sqrt( sum_squares.direction[i].red-(mean.direction[i].red* mean.direction[i].red))); channel_features[GreenChannel].correlation[i]= (correlation.direction[i].green-mean.direction[i].green* mean.direction[i].green)/(sqrt(sum_squares.direction[i].green- (mean.direction[i].green*mean.direction[i].green))*sqrt( sum_squares.direction[i].green-(mean.direction[i].green* mean.direction[i].green))); channel_features[BlueChannel].correlation[i]= (correlation.direction[i].blue-mean.direction[i].blue* mean.direction[i].blue)/(sqrt(sum_squares.direction[i].blue- (mean.direction[i].blue*mean.direction[i].blue))*sqrt( sum_squares.direction[i].blue-(mean.direction[i].blue* mean.direction[i].blue))); if (image->matte != MagickFalse) channel_features[OpacityChannel].correlation[i]= (correlation.direction[i].opacity-mean.direction[i].opacity* mean.direction[i].opacity)/(sqrt(sum_squares.direction[i].opacity- (mean.direction[i].opacity*mean.direction[i].opacity))*sqrt( sum_squares.direction[i].opacity-(mean.direction[i].opacity* mean.direction[i].opacity))); if (image->colorspace == CMYKColorspace) channel_features[IndexChannel].correlation[i]= (correlation.direction[i].index-mean.direction[i].index* mean.direction[i].index)/(sqrt(sum_squares.direction[i].index- (mean.direction[i].index*mean.direction[i].index))*sqrt( sum_squares.direction[i].index-(mean.direction[i].index* mean.direction[i].index))); } /* Compute more texture features. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(status) #endif for (i=0; i < 4; i++) { register ssize_t x; for (x=2; x < (ssize_t) (2*number_grays); x++) { /* Sum average. */ channel_features[RedChannel].sum_average[i]+= x*density_xy[x].direction[i].red; channel_features[GreenChannel].sum_average[i]+= x*density_xy[x].direction[i].green; channel_features[BlueChannel].sum_average[i]+= x*density_xy[x].direction[i].blue; if (image->matte != MagickFalse) channel_features[OpacityChannel].sum_average[i]+= x*density_xy[x].direction[i].opacity; if (image->colorspace == CMYKColorspace) channel_features[IndexChannel].sum_average[i]+= x*density_xy[x].direction[i].index; /* Sum entropy. */ channel_features[RedChannel].sum_entropy[i]-= density_xy[x].direction[i].red* log10(density_xy[x].direction[i].red+MagickEpsilon); channel_features[GreenChannel].sum_entropy[i]-= density_xy[x].direction[i].green* log10(density_xy[x].direction[i].green+MagickEpsilon); channel_features[BlueChannel].sum_entropy[i]-= density_xy[x].direction[i].blue* log10(density_xy[x].direction[i].blue+MagickEpsilon); if (image->matte != MagickFalse) channel_features[OpacityChannel].sum_entropy[i]-= density_xy[x].direction[i].opacity* log10(density_xy[x].direction[i].opacity+MagickEpsilon); if (image->colorspace == CMYKColorspace) channel_features[IndexChannel].sum_entropy[i]-= density_xy[x].direction[i].index* log10(density_xy[x].direction[i].index+MagickEpsilon); /* Sum variance. */ channel_features[RedChannel].sum_variance[i]+= (x-channel_features[RedChannel].sum_entropy[i])* (x-channel_features[RedChannel].sum_entropy[i])* density_xy[x].direction[i].red; channel_features[GreenChannel].sum_variance[i]+= (x-channel_features[GreenChannel].sum_entropy[i])* (x-channel_features[GreenChannel].sum_entropy[i])* density_xy[x].direction[i].green; channel_features[BlueChannel].sum_variance[i]+= (x-channel_features[BlueChannel].sum_entropy[i])* (x-channel_features[BlueChannel].sum_entropy[i])* density_xy[x].direction[i].blue; if (image->matte != MagickFalse) channel_features[OpacityChannel].sum_variance[i]+= (x-channel_features[OpacityChannel].sum_entropy[i])* (x-channel_features[OpacityChannel].sum_entropy[i])* density_xy[x].direction[i].opacity; if (image->colorspace == CMYKColorspace) channel_features[IndexChannel].sum_variance[i]+= (x-channel_features[IndexChannel].sum_entropy[i])* (x-channel_features[IndexChannel].sum_entropy[i])* density_xy[x].direction[i].index; } } /* Compute more texture features. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(status) #endif for (i=0; i < 4; i++) { register ssize_t y; for (y=0; y < (ssize_t) number_grays; y++) { register ssize_t x; for (x=0; x < (ssize_t) number_grays; x++) { /* Sum of Squares: Variance */ variance.direction[i].red+=(y-mean.direction[i].red+1)* (y-mean.direction[i].red+1)*cooccurrence[x][y].direction[i].red; variance.direction[i].green+=(y-mean.direction[i].green+1)* (y-mean.direction[i].green+1)*cooccurrence[x][y].direction[i].green; variance.direction[i].blue+=(y-mean.direction[i].blue+1)* (y-mean.direction[i].blue+1)*cooccurrence[x][y].direction[i].blue; if (image->matte != MagickFalse) variance.direction[i].opacity+=(y-mean.direction[i].opacity+1)* (y-mean.direction[i].opacity+1)* cooccurrence[x][y].direction[i].opacity; if (image->colorspace == CMYKColorspace) variance.direction[i].index+=(y-mean.direction[i].index+1)* (y-mean.direction[i].index+1)*cooccurrence[x][y].direction[i].index; /* Sum average / Difference Variance. */ density_xy[MagickAbsoluteValue(y-x)].direction[i].red+= cooccurrence[x][y].direction[i].red; density_xy[MagickAbsoluteValue(y-x)].direction[i].green+= cooccurrence[x][y].direction[i].green; density_xy[MagickAbsoluteValue(y-x)].direction[i].blue+= cooccurrence[x][y].direction[i].blue; if (image->matte != MagickFalse) density_xy[MagickAbsoluteValue(y-x)].direction[i].opacity+= cooccurrence[x][y].direction[i].opacity; if (image->colorspace == CMYKColorspace) density_xy[MagickAbsoluteValue(y-x)].direction[i].index+= cooccurrence[x][y].direction[i].index; /* Information Measures of Correlation. */ entropy_xy.direction[i].red-=cooccurrence[x][y].direction[i].red* log10(cooccurrence[x][y].direction[i].red+MagickEpsilon); entropy_xy.direction[i].green-=cooccurrence[x][y].direction[i].green* log10(cooccurrence[x][y].direction[i].green+MagickEpsilon); entropy_xy.direction[i].blue-=cooccurrence[x][y].direction[i].blue* log10(cooccurrence[x][y].direction[i].blue+MagickEpsilon); if (image->matte != MagickFalse) entropy_xy.direction[i].opacity-= cooccurrence[x][y].direction[i].opacity*log10( cooccurrence[x][y].direction[i].opacity+MagickEpsilon); if (image->colorspace == CMYKColorspace) entropy_xy.direction[i].index-=cooccurrence[x][y].direction[i].index* log10(cooccurrence[x][y].direction[i].index+MagickEpsilon); entropy_xy1.direction[i].red-=(cooccurrence[x][y].direction[i].red* log10(density_x[x].direction[i].red*density_y[y].direction[i].red+ MagickEpsilon)); entropy_xy1.direction[i].green-=(cooccurrence[x][y].direction[i].green* log10(density_x[x].direction[i].green*density_y[y].direction[i].green+ MagickEpsilon)); entropy_xy1.direction[i].blue-=(cooccurrence[x][y].direction[i].blue* log10(density_x[x].direction[i].blue*density_y[y].direction[i].blue+ MagickEpsilon)); if (image->matte != MagickFalse) entropy_xy1.direction[i].opacity-=( cooccurrence[x][y].direction[i].opacity*log10( density_x[x].direction[i].opacity*density_y[y].direction[i].opacity+ MagickEpsilon)); if (image->colorspace == CMYKColorspace) entropy_xy1.direction[i].index-=( cooccurrence[x][y].direction[i].index*log10( density_x[x].direction[i].index*density_y[y].direction[i].index+ MagickEpsilon)); entropy_xy2.direction[i].red-=(density_x[x].direction[i].red* density_y[y].direction[i].red*log10(density_x[x].direction[i].red* density_y[y].direction[i].red+MagickEpsilon)); entropy_xy2.direction[i].green-=(density_x[x].direction[i].green* density_y[y].direction[i].green*log10(density_x[x].direction[i].green* density_y[y].direction[i].green+MagickEpsilon)); entropy_xy2.direction[i].blue-=(density_x[x].direction[i].blue* density_y[y].direction[i].blue*log10(density_x[x].direction[i].blue* density_y[y].direction[i].blue+MagickEpsilon)); if (image->matte != MagickFalse) entropy_xy2.direction[i].opacity-=(density_x[x].direction[i].opacity* density_y[y].direction[i].opacity*log10( density_x[x].direction[i].opacity*density_y[y].direction[i].opacity+ MagickEpsilon)); if (image->colorspace == CMYKColorspace) entropy_xy2.direction[i].index-=(density_x[x].direction[i].index* density_y[y].direction[i].index*log10( density_x[x].direction[i].index*density_y[y].direction[i].index+ MagickEpsilon)); } } channel_features[RedChannel].variance_sum_of_squares[i]= variance.direction[i].red; channel_features[GreenChannel].variance_sum_of_squares[i]= variance.direction[i].green; channel_features[BlueChannel].variance_sum_of_squares[i]= variance.direction[i].blue; if (image->matte != MagickFalse) channel_features[RedChannel].variance_sum_of_squares[i]= variance.direction[i].opacity; if (image->colorspace == CMYKColorspace) channel_features[RedChannel].variance_sum_of_squares[i]= variance.direction[i].index; } /* Compute more texture features. */ (void) ResetMagickMemory(&variance,0,sizeof(variance)); (void) ResetMagickMemory(&sum_squares,0,sizeof(sum_squares)); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(status) #endif for (i=0; i < 4; i++) { register ssize_t x; for (x=0; x < (ssize_t) number_grays; x++) { /* Difference variance. */ variance.direction[i].red+=density_xy[x].direction[i].red; variance.direction[i].green+=density_xy[x].direction[i].green; variance.direction[i].blue+=density_xy[x].direction[i].blue; if (image->matte != MagickFalse) variance.direction[i].opacity+=density_xy[x].direction[i].opacity; if (image->colorspace == CMYKColorspace) variance.direction[i].index+=density_xy[x].direction[i].index; sum_squares.direction[i].red+=density_xy[x].direction[i].red* density_xy[x].direction[i].red; sum_squares.direction[i].green+=density_xy[x].direction[i].green* density_xy[x].direction[i].green; sum_squares.direction[i].blue+=density_xy[x].direction[i].blue* density_xy[x].direction[i].blue; if (image->matte != MagickFalse) sum_squares.direction[i].opacity+=density_xy[x].direction[i].opacity* density_xy[x].direction[i].opacity; if (image->colorspace == CMYKColorspace) sum_squares.direction[i].index+=density_xy[x].direction[i].index* density_xy[x].direction[i].index; /* Difference entropy. */ channel_features[RedChannel].difference_entropy[i]-= density_xy[x].direction[i].red* log10(density_xy[x].direction[i].red+MagickEpsilon); channel_features[GreenChannel].difference_entropy[i]-= density_xy[x].direction[i].green* log10(density_xy[x].direction[i].green+MagickEpsilon); channel_features[BlueChannel].difference_entropy[i]-= density_xy[x].direction[i].blue* log10(density_xy[x].direction[i].blue+MagickEpsilon); if (image->matte != MagickFalse) channel_features[OpacityChannel].difference_entropy[i]-= density_xy[x].direction[i].opacity* log10(density_xy[x].direction[i].opacity+MagickEpsilon); if (image->colorspace == CMYKColorspace) channel_features[IndexChannel].difference_entropy[i]-= density_xy[x].direction[i].index* log10(density_xy[x].direction[i].index+MagickEpsilon); /* Information Measures of Correlation. */ entropy_x.direction[i].red-=(density_x[x].direction[i].red* log10(density_x[x].direction[i].red+MagickEpsilon)); entropy_x.direction[i].green-=(density_x[x].direction[i].green* log10(density_x[x].direction[i].green+MagickEpsilon)); entropy_x.direction[i].blue-=(density_x[x].direction[i].blue* log10(density_x[x].direction[i].blue+MagickEpsilon)); if (image->matte != MagickFalse) entropy_x.direction[i].opacity-=(density_x[x].direction[i].opacity* log10(density_x[x].direction[i].opacity+MagickEpsilon)); if (image->colorspace == CMYKColorspace) entropy_x.direction[i].index-=(density_x[x].direction[i].index* log10(density_x[x].direction[i].index+MagickEpsilon)); entropy_y.direction[i].red-=(density_y[y].direction[i].red* log10(density_y[y].direction[i].red+MagickEpsilon)); entropy_y.direction[i].green-=(density_y[y].direction[i].green* log10(density_y[y].direction[i].green+MagickEpsilon)); entropy_y.direction[i].blue-=(density_y[y].direction[i].blue* log10(density_y[y].direction[i].blue+MagickEpsilon)); if (image->matte != MagickFalse) entropy_y.direction[i].opacity-=(density_y[y].direction[i].opacity* log10(density_y[y].direction[i].opacity+MagickEpsilon)); if (image->colorspace == CMYKColorspace) entropy_y.direction[i].index-=(density_y[y].direction[i].index* log10(density_y[y].direction[i].index+MagickEpsilon)); } /* Difference variance. */ channel_features[RedChannel].difference_variance[i]= (((double) number_grays*number_grays*sum_squares.direction[i].red)- (variance.direction[i].red*variance.direction[i].red))/ ((double) number_grays*number_grays*number_grays*number_grays); channel_features[GreenChannel].difference_variance[i]= (((double) number_grays*number_grays*sum_squares.direction[i].green)- (variance.direction[i].green*variance.direction[i].green))/ ((double) number_grays*number_grays*number_grays*number_grays); channel_features[BlueChannel].difference_variance[i]= (((double) number_grays*number_grays*sum_squares.direction[i].blue)- (variance.direction[i].blue*variance.direction[i].blue))/ ((double) number_grays*number_grays*number_grays*number_grays); if (image->matte != MagickFalse) channel_features[OpacityChannel].difference_variance[i]= (((double) number_grays*number_grays*sum_squares.direction[i].opacity)- (variance.direction[i].opacity*variance.direction[i].opacity))/ ((double) number_grays*number_grays*number_grays*number_grays); if (image->colorspace == CMYKColorspace) channel_features[IndexChannel].difference_variance[i]= (((double) number_grays*number_grays*sum_squares.direction[i].index)- (variance.direction[i].index*variance.direction[i].index))/ ((double) number_grays*number_grays*number_grays*number_grays); /* Information Measures of Correlation. */ channel_features[RedChannel].measure_of_correlation_1[i]= (entropy_xy.direction[i].red-entropy_xy1.direction[i].red)/ (entropy_x.direction[i].red > entropy_y.direction[i].red ? entropy_x.direction[i].red : entropy_y.direction[i].red); channel_features[GreenChannel].measure_of_correlation_1[i]= (entropy_xy.direction[i].green-entropy_xy1.direction[i].green)/ (entropy_x.direction[i].green > entropy_y.direction[i].green ? entropy_x.direction[i].green : entropy_y.direction[i].green); channel_features[BlueChannel].measure_of_correlation_1[i]= (entropy_xy.direction[i].blue-entropy_xy1.direction[i].blue)/ (entropy_x.direction[i].blue > entropy_y.direction[i].blue ? entropy_x.direction[i].blue : entropy_y.direction[i].blue); if (image->matte != MagickFalse) channel_features[OpacityChannel].measure_of_correlation_1[i]= (entropy_xy.direction[i].opacity-entropy_xy1.direction[i].opacity)/ (entropy_x.direction[i].opacity > entropy_y.direction[i].opacity ? entropy_x.direction[i].opacity : entropy_y.direction[i].opacity); if (image->colorspace == CMYKColorspace) channel_features[IndexChannel].measure_of_correlation_1[i]= (entropy_xy.direction[i].index-entropy_xy1.direction[i].index)/ (entropy_x.direction[i].index > entropy_y.direction[i].index ? entropy_x.direction[i].index : entropy_y.direction[i].index); channel_features[RedChannel].measure_of_correlation_2[i]= (sqrt(fabs(1.0-exp(-2.0*(entropy_xy2.direction[i].red- entropy_xy.direction[i].red))))); channel_features[GreenChannel].measure_of_correlation_2[i]= (sqrt(fabs(1.0-exp(-2.0*(entropy_xy2.direction[i].green- entropy_xy.direction[i].green))))); channel_features[BlueChannel].measure_of_correlation_2[i]= (sqrt(fabs(1.0-exp(-2.0*(entropy_xy2.direction[i].blue- entropy_xy.direction[i].blue))))); if (image->matte != MagickFalse) channel_features[OpacityChannel].measure_of_correlation_2[i]= (sqrt(fabs(1.0-exp(-2.0*(entropy_xy2.direction[i].opacity- entropy_xy.direction[i].opacity))))); if (image->colorspace == CMYKColorspace) channel_features[IndexChannel].measure_of_correlation_2[i]= (sqrt(fabs(1.0-exp(-2.0*(entropy_xy2.direction[i].index- entropy_xy.direction[i].index))))); } /* Compute more texture features. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(status) #endif for (i=0; i < 4; i++) { for (z=0; z < (ssize_t) number_grays; z++) { register ssize_t y; ChannelStatistics pixel; (void) ResetMagickMemory(&pixel,0,sizeof(pixel)); for (y=0; y < (ssize_t) number_grays; y++) { register ssize_t x; for (x=0; x < (ssize_t) number_grays; x++) { /* Contrast: amount of local variations present in an image. */ if (((y-x) == z) || ((x-y) == z)) { pixel.direction[i].red+=cooccurrence[x][y].direction[i].red; pixel.direction[i].green+=cooccurrence[x][y].direction[i].green; pixel.direction[i].blue+=cooccurrence[x][y].direction[i].blue; if (image->matte != MagickFalse) pixel.direction[i].opacity+= cooccurrence[x][y].direction[i].opacity; if (image->colorspace == CMYKColorspace) pixel.direction[i].index+=cooccurrence[x][y].direction[i].index; } /* Maximum Correlation Coefficient. */ Q[z][y].direction[i].red+=cooccurrence[z][x].direction[i].red* cooccurrence[y][x].direction[i].red/density_x[z].direction[i].red/ density_y[x].direction[i].red; Q[z][y].direction[i].green+=cooccurrence[z][x].direction[i].green* cooccurrence[y][x].direction[i].green/ density_x[z].direction[i].green/density_y[x].direction[i].red; Q[z][y].direction[i].blue+=cooccurrence[z][x].direction[i].blue* cooccurrence[y][x].direction[i].blue/density_x[z].direction[i].blue/ density_y[x].direction[i].blue; if (image->matte != MagickFalse) Q[z][y].direction[i].opacity+= cooccurrence[z][x].direction[i].opacity* cooccurrence[y][x].direction[i].opacity/ density_x[z].direction[i].opacity/ density_y[x].direction[i].opacity; if (image->colorspace == CMYKColorspace) Q[z][y].direction[i].index+=cooccurrence[z][x].direction[i].index* cooccurrence[y][x].direction[i].index/ density_x[z].direction[i].index/density_y[x].direction[i].index; } } channel_features[RedChannel].contrast[i]+=z*z*pixel.direction[i].red; channel_features[GreenChannel].contrast[i]+=z*z*pixel.direction[i].green; channel_features[BlueChannel].contrast[i]+=z*z*pixel.direction[i].blue; if (image->matte != MagickFalse) channel_features[OpacityChannel].contrast[i]+=z*z* pixel.direction[i].opacity; if (image->colorspace == CMYKColorspace) channel_features[BlackChannel].contrast[i]+=z*z* pixel.direction[i].index; } /* Maximum Correlation Coefficient. Future: return second largest eigenvalue of Q. */ channel_features[RedChannel].maximum_correlation_coefficient[i]= sqrt((double) -1.0); channel_features[GreenChannel].maximum_correlation_coefficient[i]= sqrt((double) -1.0); channel_features[BlueChannel].maximum_correlation_coefficient[i]= sqrt((double) -1.0); if (image->matte != MagickFalse) channel_features[OpacityChannel].maximum_correlation_coefficient[i]= sqrt((double) -1.0); if (image->colorspace == CMYKColorspace) channel_features[IndexChannel].maximum_correlation_coefficient[i]= sqrt((double) -1.0); } /* Relinquish resources. */ sum=(ChannelStatistics *) RelinquishMagickMemory(sum); for (i=0; i < (ssize_t) number_grays; i++) Q[i]=(ChannelStatistics *) RelinquishMagickMemory(Q[i]); Q=(ChannelStatistics **) RelinquishMagickMemory(Q); density_y=(ChannelStatistics *) RelinquishMagickMemory(density_y); density_xy=(ChannelStatistics *) RelinquishMagickMemory(density_xy); density_x=(ChannelStatistics *) RelinquishMagickMemory(density_x); for (i=0; i < (ssize_t) number_grays; i++) cooccurrence[i]=(ChannelStatistics *) RelinquishMagickMemory(cooccurrence[i]); cooccurrence=(ChannelStatistics **) RelinquishMagickMemory(cooccurrence); return(channel_features); }
H2Pack_ID_compress.c
#include <stdio.h> #include <string.h> #include <stdlib.h> #include <assert.h> #include <math.h> #include <float.h> #include "H2Pack_config.h" #include "H2Pack_aux_structs.h" #include "utils.h" static inline void swap_int(int *x, int *y, int len) { int tmp; for (int i = 0; i < len; i++) { tmp = x[i]; x[i] = y[i]; y[i] = tmp; } } static inline void swap_DTYPE(DTYPE *x, DTYPE *y, int len) { DTYPE tmp; for (int i = 0; i < len; i++) { tmp = x[i]; x[i] = y[i]; y[i] = tmp; } } // Partial pivoting QR decomposition, simplified output version // The partial pivoting QR decomposition is of form: // A * P = Q * [R11, R12; 0, R22] // where R11 is an upper-triangular matrix, R12 and R22 are dense matrices, // P is a permutation matrix. // Input parameters: // A : Target matrix, stored in column major // tol_rank : QR stopping parameter, maximum column rank, // tol_norm : QR stopping parameter, maximum column 2-norm // rel_norm : If tol_norm is relative to the largest column 2-norm in A // n_thread : Number of threads used in this function // QR_buff : Size A->ncol, working buffer for partial pivoting QR // Output parameters: // A : Matrix R: [R11, R12; 0, R22] // p : Matrix A column permutation array, A(:, p) = A * P // r : Dimension of upper-triangular matrix R11 void H2P_partial_pivot_QR( H2P_dense_mat_p A, const int tol_rank, const DTYPE tol_norm, const int rel_norm, int *p, int *r, const int n_thread, DTYPE *QR_buff ) { DTYPE *R = A->data; int nrow = A->nrow; int ncol = A->ncol; int ldR = A->ld; int max_iter = MIN(nrow, ncol); BLAS_SET_NUM_THREADS(n_thread); DTYPE *col_norm = QR_buff; DTYPE eps_t, fast_norm_threshold_t; if (sizeof(DTYPE) == 8) { eps_t = 1e-15; fast_norm_threshold_t = 1e-10; } else { eps_t = 1e-6; fast_norm_threshold_t = 1e-5; } // Find a column with largest 2-norm #pragma omp parallel for if (n_thread > 1) \ num_threads(n_thread) schedule(static) for (int j = 0; j < ncol; j++) { p[j] = j; col_norm[j] = CBLAS_NRM2(nrow, R + j * ldR, 1); } DTYPE norm_p = 0.0; int pivot = 0; for (int j = 0; j < ncol; j++) { if (col_norm[j] > norm_p) { norm_p = col_norm[j]; pivot = j; } } // Scale the stopping norm int stop_rank = MIN(max_iter, tol_rank); DTYPE norm_eps = DSQRT((DTYPE) nrow) * eps_t; DTYPE stop_norm = MAX(norm_eps, tol_norm); if (rel_norm) stop_norm *= norm_p; int rank = -1; // Main iteration of Household QR for (int i = 0; i < max_iter; i++) { // 1. Check the stop criteria if ((norm_p < stop_norm) || (i >= stop_rank)) { rank = i; break; } // 2. Swap the column if (i != pivot) { swap_int(p + i, p + pivot, 1); swap_DTYPE(col_norm + i, col_norm + pivot, 1); swap_DTYPE(R + i * ldR, R + pivot * ldR, nrow); } // 3. Calculate Householder vector int h_len = nrow - i; int h_len_m1 = h_len - 1; DTYPE *h_vec = R + i * ldR + i; DTYPE sign = (h_vec[0] > 0.0) ? 1.0 : -1.0; DTYPE h_norm = CBLAS_NRM2(h_len, h_vec, 1); h_vec[0] = h_vec[0] + sign * h_norm; DTYPE inv_h_norm = 1.0 / CBLAS_NRM2(h_len, h_vec, 1); #pragma omp simd for (int j = 0; j < h_len; j++) h_vec[j] *= inv_h_norm; // 4. & 5. Householder update & column norm update DTYPE *R_block = R + (i + 1) * ldR + i; int R_block_nrow = h_len; int R_block_ncol = ncol - i - 1; #pragma omp parallel for if (n_thread > 1) \ num_threads(n_thread) schedule(guided) for (int j = 0; j < R_block_ncol; j++) { int ji1 = j + i + 1; DTYPE *R_block_j = R_block + j * ldR; DTYPE h_Rj = 2.0 * CBLAS_DOT(R_block_nrow, h_vec, 1, R_block_j, 1); // 4. Orthogonalize columns right to the i-th column #pragma omp simd for (int k = 0; k < R_block_nrow; k++) R_block_j[k] -= h_Rj * h_vec[k]; // 5. Update i-th column's 2-norm if (col_norm[ji1] < stop_norm) { col_norm[ji1] = 0.0; continue; } DTYPE tmp = R_block_j[0] * R_block_j[0]; tmp = col_norm[ji1] * col_norm[ji1] - tmp; if (tmp <= fast_norm_threshold_t) { col_norm[ji1] = CBLAS_NRM2(h_len_m1, R_block_j + 1, 1); } else { // Fast update 2-norm when the new column norm is not so small col_norm[ji1] = DSQRT(tmp); } } // We don't need h_vec anymore, can overwrite the i-th column of R h_vec[0] = -sign * h_norm; memset(h_vec + 1, 0, sizeof(DTYPE) * (h_len - 1)); // Find next pivot pivot = i + 1; norm_p = 0.0; for (int j = i + 1; j < ncol; j++) { if (col_norm[j] > norm_p) { norm_p = col_norm[j]; pivot = j; } } } if (rank == -1) rank = max_iter; *r = rank; } // H2P_partial_pivot_QR_kdim operated on column blocks for tensor kernel matrix. // Each column block has kdim columns. In each step, we swap kdim columns // and do kdim Householder orthogonalization. // Size of QR_buff: (2*kdim+2)*A->nrow + (kdim+1)*A->ncol // BLAS-3 approach, ref: https://doi.org/10.1145/1513895.1513904 void H2P_partial_pivot_QR_kdim( H2P_dense_mat_p A, const int kdim, const int tol_rank, const DTYPE tol_norm, const int rel_norm, int *p, int *r, const int n_thread, DTYPE *QR_buff ) { DTYPE *R = A->data; int nrow = A->nrow; int ncol = A->ncol; int nblk = ncol / kdim; int ldR = A->ld; int max_iter = MIN(nrow, ncol) / kdim; BLAS_SET_NUM_THREADS(n_thread); DTYPE eps_t, fast_norm_threshold_t; if (sizeof(DTYPE) == 8) { eps_t = 1e-15; fast_norm_threshold_t = 1e-10; } else { eps_t = 1e-6; fast_norm_threshold_t = 1e-5; } for (int j = 0; j < ncol; j++) p[j] = j; // Partition the work buffer DTYPE *blk_norm = QR_buff; DTYPE *Vblk = blk_norm + ncol; DTYPE *Wblk = Vblk + kdim * nrow; DTYPE *VV = Wblk + kdim * nrow; DTYPE *WVV = VV + nrow; DTYPE *WR = WVV + nrow; // Find a column with largest 2-norm as a scaling factor #pragma omp parallel for if(n_thread > 1) \ num_threads(n_thread) schedule(static) for (int j = 0; j < ncol; j++) blk_norm[j] = CBLAS_NRM2(nrow, R + j * ldR, 1); // Find a column block with largest 2-norm as the first pivot DTYPE norm_p = 0.0; int pivot = 0; for (int j = 0; j < nblk; j++) { DTYPE tmp = 0.0; for (int k = 0; k < kdim; k++) { int idx = kdim * j + k; tmp += blk_norm[idx] * blk_norm[idx]; } blk_norm[j] = DSQRT(tmp); if (blk_norm[j] > norm_p) { norm_p = blk_norm[j]; pivot = j; } } // Scale the stopping norm int stop_rank = MIN(max_iter, tol_rank/kdim); DTYPE norm_eps = DSQRT((DTYPE) nrow) * eps_t; DTYPE stop_norm = MAX(norm_eps, tol_norm); if (rel_norm) stop_norm *= norm_p; // Main iteration of Household QR int rank = -1; for (int i = 0; i < max_iter; i++) { // 1. Check the stop criteria if ((norm_p < stop_norm) || (i >= stop_rank)) { rank = i * kdim; break; } // 2. Swap the column if (i != pivot) { swap_int(p + i * kdim, p + pivot * kdim, kdim); swap_DTYPE(blk_norm + i, blk_norm + pivot, 1); DTYPE *R_i = R + i * kdim * ldR; DTYPE *R_pivot = R + pivot * kdim * ldR; swap_DTYPE(R_i, R_pivot, ldR * kdim); } int VWblk_nrow = nrow - i * kdim; // 3. Do kdim times of consecutive Householder orthogonalize on the current column block for (int ii = i * kdim; ii < i * kdim + kdim; ii++) { // 3.1 Calculate Householder vector int h_len = nrow - ii; DTYPE *h_vec = R + ii * ldR + ii; DTYPE sign = (h_vec[0] > 0.0) ? 1.0 : -1.0; DTYPE h_norm = CBLAS_NRM2(h_len, h_vec, 1); h_vec[0] = h_vec[0] + sign * h_norm; DTYPE inv_h_norm = 1.0 / CBLAS_NRM2(h_len, h_vec, 1); #pragma omp simd for (int j = 0; j < h_len; j++) h_vec[j] *= inv_h_norm; // 3.2 Householder update current column block int blk_i = ii - i * kdim; DTYPE *R_block = R + (ii + 1) * ldR + ii; int R_block_nrow = h_len; //int R_block_ncol = ncol - ii - 1; for (int j = 0; j < kdim - blk_i - 1; j++) { //int ji1 = j + ii + 1; DTYPE *R_block_j = R_block + j * ldR; DTYPE h_Rj = 2.0 * CBLAS_DOT(R_block_nrow, h_vec, 1, R_block_j, 1); // Orthogonalize columns right to the ii-th column #pragma omp simd for (int k = 0; k < R_block_nrow; k++) R_block_j[k] -= h_Rj * h_vec[k]; } // Save the Householder vector for block update for (int j = 0; j < blk_i; j++) Vblk[blk_i * VWblk_nrow + j] = 0.0; memcpy(Vblk + blk_i * VWblk_nrow + blk_i, h_vec, sizeof(DTYPE) * h_len); // We don't need h_vec anymore, can overwrite the i-th column of R h_vec[0] = -sign * h_norm; memset(h_vec + 1, 0, sizeof(DTYPE) * (h_len - 1)); } // 4. Construct W, use V & W for block Householder update #pragma omp simd for (int j = 0; j < VWblk_nrow; j++) Wblk[j] = -2.0 * Vblk[j]; for (int ii = 1; ii < kdim; ii++) { DTYPE *Vii = Vblk + ii * VWblk_nrow; DTYPE *Wii = Wblk + ii * VWblk_nrow; CBLAS_GEMV( CblasColMajor, CblasTrans, VWblk_nrow, ii, 1.0, Vblk, VWblk_nrow, Vii, 1, 0.0, VV, 1 ); CBLAS_GEMV( CblasColMajor, CblasNoTrans, VWblk_nrow, ii, 1.0, Wblk, VWblk_nrow, VV, 1, 0.0, WVV, 1 ); #pragma omp simd for (int j = 0; j < VWblk_nrow; j++) Wii[j] = -2.0 * (Vii[j] + WVV[j]); } int R_blk_scol = i * kdim + kdim; int R_blk_ncol = ncol - R_blk_scol; DTYPE *R_blk = R + R_blk_scol * ldR + (i * kdim); int R_col_blk_128KB = 128 * 1024; R_col_blk_128KB /= (int) sizeof(DTYPE); R_col_blk_128KB /= (VWblk_nrow * kdim * 3); if (R_col_blk_128KB < 2) R_col_blk_128KB = 8; for (int R_col_offset = 0; R_col_offset < R_blk_ncol; R_col_offset += R_col_blk_128KB) { int R_col_blksize = R_col_blk_128KB; if (R_col_blksize + R_col_offset > R_blk_ncol) R_col_blksize = R_blk_ncol - R_col_offset; CBLAS_GEMM( CblasColMajor, CblasTrans, CblasNoTrans, kdim, R_col_blksize, VWblk_nrow, 1.0, Wblk, VWblk_nrow, R_blk + ldR * R_col_offset, ldR, 0.0, WR, kdim ); CBLAS_GEMM( CblasColMajor, CblasNoTrans, CblasNoTrans, VWblk_nrow, R_col_blksize, kdim, 1.0, Vblk, VWblk_nrow, WR, kdim, 1.0, R_blk + ldR * R_col_offset, ldR ); } // 5. Update i-th column's 2-norm #pragma omp parallel for if(n_thread > 1) \ num_threads(n_thread) schedule(guided) for (int j = i + 1; j < nblk; j++) { if (blk_norm[j] < stop_norm) { blk_norm[j] = 0.0; continue; } // R(i * kdim, j * kdim + k) DTYPE *R_block = R + i * kdim + j * kdim * ldR; DTYPE tmp = blk_norm[j] * blk_norm[j]; for (int k0 = 0; k0 < kdim; k0++) { for (int k1 = 0; k1 < kdim; k1++) { int idx = k0 * ldR + k1; tmp -= R_block[idx] * R_block[idx]; } } if (tmp <= fast_norm_threshold_t) { const int nrm_len = nrow - (i + 1) * kdim; tmp = 0.0; for (int k = 0; k < kdim; k++) { // R((i + 1) * kdim, j * kdim + k) DTYPE *R_block_k = R + (j*kdim+k) * ldR + (i+1) * kdim; DTYPE tmp1 = CBLAS_NRM2(nrm_len, R_block_k, 1); tmp += tmp1 * tmp1; } blk_norm[j] = DSQRT(tmp); } else { // Fast update 2-norm when the new column norm is not so small blk_norm[j] = DSQRT(tmp); } } // 6. Find next pivot pivot = i + 1; norm_p = 0.0; for (int j = i + 1; j < nblk; j++) { if (blk_norm[j] > norm_p) { norm_p = blk_norm[j]; pivot = j; } } } if (rank == -1) rank = max_iter * kdim; *r = rank; } // Partial pivoting QR for ID // Input parameters: // A : Target matrix, stored in column major // stop_type : Partial QR stop criteria: QR_RANK, QR_REL_NRM, or QR_ABS_NRM // stop_param : Pointer to partial QR stop parameter // n_thread : Number of threads used in this function // QR_buff : Working buffer for partial pivoting QR. If kdim == 1, size A->ncol. // If kdim > 1, size (2*kdim+2)*A->nrow + (kdim+1)*A->ncol. // kdim : Dimension of tensor kernel's return (column block size) // Output parameters: // A : Matrix R: [R11, R12] // p : Matrix A column permutation array, A(:, p) = A * P void H2P_ID_QR( H2P_dense_mat_p A, const int stop_type, void *stop_param, int *p, const int n_thread, DTYPE *QR_buff, const int kdim ) { // Parse partial QR stop criteria and perform partial QR int r, tol_rank = MIN(A->nrow, A->ncol), rel_norm = 1; DTYPE tol_norm = 1e-15; if (stop_type == QR_RANK) { int *param = (int*) stop_param; tol_rank = param[0]; tol_norm = 1e-15; rel_norm = 1; } if (stop_type == QR_REL_NRM) { DTYPE *param = (DTYPE*) stop_param; tol_rank = MIN(A->nrow, A->ncol); tol_norm = param[0] * DSQRT((DTYPE) kdim); rel_norm = 1; } if (stop_type == QR_ABS_NRM) { DTYPE *param = (DTYPE*) stop_param; tol_rank = MIN(A->nrow, A->ncol); tol_norm = param[0]; rel_norm = 0; } // Use H2P_partial_pivot_QR_kdim() for kdim == 1 also works, // but the performance is worse than H2P_partial_pivot_QR() if (kdim == 1) { H2P_partial_pivot_QR( A, tol_rank, tol_norm, rel_norm, p, &r, n_thread, QR_buff ); } else { H2P_partial_pivot_QR_kdim( A, kdim, tol_rank, tol_norm, rel_norm, p, &r, n_thread, QR_buff ); } // Special case: each column's 2-norm is smaller than the threshold if (r == 0) { for (int i = 0; i < A->ncol; i++) p[i] = i; A->nrow = 0; return; } // Truncate R to be [R11, R12] and return A->nrow = r; } // Quick sort two array in ascending order // Input parameters: // key : Key array // val : Value array // l, r : Sort range [l : r] // Output parameters: // key : Sorted key array // val : Sorted value array static void H2P_qsort_key_value(int *key, int *val, const int l, const int r) { int i = l, j = r, tmp; int mid = key[(i + j) / 2]; while (i <= j) { while (key[i] < mid) i++; while (key[j] > mid) j--; if (i <= j) { tmp = key[i]; key[i] = key[j]; key[j] = tmp; tmp = val[i]; val[i] = val[j]; val[j] = tmp; i++; j--; } } if (i < r) H2P_qsort_key_value(key, val, i, r); if (j > l) H2P_qsort_key_value(key, val, l, j); } // Interpolative Decomposition (ID) using partial QR over rows of a target // matrix. Partial pivoting QR may need to be upgraded to SRRQR later. void H2P_ID_compress( H2P_dense_mat_p A, const int stop_type, void *stop_param, H2P_dense_mat_p *U_, H2P_int_vec_p J, const int n_thread, DTYPE *QR_buff, int *ID_buff, const int kdim ) { // 1. Partial pivoting QR for A^T // Note: A is stored in row major style but H2P_ID_QR needs A stored in column // major style. We manipulate the size information of A to save some time const int nrow = A->nrow; const int ncol = A->ncol; A->nrow = ncol; A->ncol = nrow; H2P_int_vec_set_capacity(J, nrow); H2P_ID_QR(A, stop_type, stop_param, J->data, n_thread, QR_buff, kdim); H2P_dense_mat_p R = A; // Note: the output R stored in A is still stored in column major style int r = A->nrow; // Obtained rank J->length = r; // 2. Set permutation indices p0, sort the index subset J[0:r-1] int *p0 = ID_buff + 0 * nrow; int *p1 = ID_buff + 1 * nrow; int *i0 = ID_buff + 2 * nrow; int *i1 = ID_buff + 3 * nrow; for (int i = 0; i < nrow; i++) { p0[J->data[i]] = i; i0[i] = i; } H2P_qsort_key_value(J->data, i0, 0, r - 1); for (int i = 0; i < nrow; i++) i1[i0[i]] = i; for (int i = 0; i < nrow; i++) p1[i] = i1[p0[i]]; // 3. Form the output U H2P_dense_mat_p U; if (r == 0) { // Special case: rank = 0, set U and J as empty H2P_dense_mat_init(&U, 0, 0); U->nrow = nrow; U->ncol = 0; U->ld = 0; U->data = NULL; } else { if (U_ != NULL) { // (1) Before permutation, the upper part of U is a diagonal H2P_dense_mat_init(&U, nrow, r); for (int i = 0; i < r; i++) { memset(U->data + i * r, 0, sizeof(DTYPE) * r); U->data[i * r + i] = 1.0; } DTYPE *R11 = R->data; DTYPE *R12 = R->data + r * R->ld; int nrow_R12 = r; int ncol_R12 = nrow - r; // (2) Solve E = inv(R11) * R12, stored in R12 in column major style // --> equals to what we need: E^T stored in row major style BLAS_SET_NUM_THREADS(n_thread); CBLAS_TRSM( CblasColMajor, CblasLeft, CblasUpper, CblasNoTrans, CblasNonUnit, nrow_R12, ncol_R12, 1.0, R11, R->ld, R12, R->ld ); // (3) Reorder E^T's columns according to the sorted J DTYPE *UL = U->data + r * r; for (int icol = 0; icol < r; icol++) { DTYPE *R12_icol = R12 + i0[icol]; DTYPE *UL_icol = UL + icol; for (int irow = 0; irow < ncol_R12; irow++) UL_icol[irow * r] = R12_icol[irow * R->ld]; } // (4) Permute U's rows H2P_dense_mat_permute_rows(U, p1); } } if (kdim > 1) { J->data[0] /= kdim; for (int i = 1; i < J->length / kdim; i++) J->data[i] = J->data[i * kdim] / kdim; J->length /= kdim; } if (U_ != NULL) *U_ = U; }
DRB101-task-value-orig-no.c
/* Copyright (C) 1991-2018 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it andor modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http:www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses Unicode 10.0.0. Version 10.0 of the Unicode Standard is synchronized with ISOIEC 10646:2017, fifth edition, plus the following additions from Amendment 1 to the fifth edition: - 56 emoji characters - 285 hentaigana - 3 additional Zanabazar Square characters */ /* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https:github.comLLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Cover an implicitly determined rule: In a task generating construct, * a variable without applicable rules is firstprivate. */ #include <stdio.h> int a[100]; int b[100]; void gen_task(int i) { a[i]=(i+1); return ; } int main() { int i = 0; int _ret_val_0; #pragma cetus private(i) #pragma loop name main#0 for (i=0; i<100; i ++ ) { gen_task(i); } /* checking control flow */ #pragma cetus private(i) #pragma loop name main#1 #pragma cetus parallel #pragma omp parallel for private(i) for (i=0; i<100; i ++ ) { /* assert (a[i]==i+1); */ if (a[i]!=(i+1)) { b[i]=a[i]; } } #pragma cetus private(i) #pragma loop name main#2 for (i=0; i<100; i ++ ) { printf("%d %d\n", a[i], b[i]); } _ret_val_0=0; return _ret_val_0; }
data.h
/*! * Copyright (c) 2015 by Contributors * \file data.h * \brief The input data structure of xgboost. * \author Tianqi Chen */ #ifndef XGBOOST_DATA_H_ #define XGBOOST_DATA_H_ #include <dmlc/base.h> #include <dmlc/data.h> #include <rabit/rabit.h> #include <xgboost/base.h> #include <xgboost/span.h> #include <xgboost/host_device_vector.h> #include <memory> #include <numeric> #include <algorithm> #include <string> #include <utility> #include <vector> namespace xgboost { // forward declare learner. class LearnerImpl; // forward declare dmatrix. class DMatrix; /*! \brief data type accepted by xgboost interface */ enum DataType { kFloat32 = 1, kDouble = 2, kUInt32 = 3, kUInt64 = 4 }; /*! * \brief Meta information about dataset, always sit in memory. */ class MetaInfo { public: /*! \brief number of rows in the data */ uint64_t num_row_{0}; /*! \brief number of columns in the data */ uint64_t num_col_{0}; /*! \brief number of nonzero entries in the data */ uint64_t num_nonzero_{0}; /*! \brief label of each instance */ HostDeviceVector<bst_float> labels_; /*! * \brief the index of begin and end of a group * needed when the learning task is ranking. */ std::vector<bst_group_t> group_ptr_; /*! \brief weights of each instance, optional */ HostDeviceVector<bst_float> weights_; /*! * \brief initialized margins, * if specified, xgboost will start from this init margin * can be used to specify initial prediction to boost from. */ HostDeviceVector<bst_float> base_margin_; /*! \brief default constructor */ MetaInfo() = default; /*! * \brief Get weight of each instances. * \param i Instance index. * \return The weight. */ inline bst_float GetWeight(size_t i) const { return weights_.Size() != 0 ? weights_.HostVector()[i] : 1.0f; } /*! \brief get sorted indexes (argsort) of labels by absolute value (used by cox loss) */ inline const std::vector<size_t>& LabelAbsSort() const { if (label_order_cache_.size() == labels_.Size()) { return label_order_cache_; } label_order_cache_.resize(labels_.Size()); std::iota(label_order_cache_.begin(), label_order_cache_.end(), 0); const auto& l = labels_.HostVector(); XGBOOST_PARALLEL_SORT(label_order_cache_.begin(), label_order_cache_.end(), [&l](size_t i1, size_t i2) {return std::abs(l[i1]) < std::abs(l[i2]);}); return label_order_cache_; } /*! \brief clear all the information */ void Clear(); /*! * \brief Load the Meta info from binary stream. * \param fi The input stream */ void LoadBinary(dmlc::Stream* fi); /*! * \brief Save the Meta info to binary stream * \param fo The output stream. */ void SaveBinary(dmlc::Stream* fo) const; /*! * \brief Set information in the meta info. * \param key The key of the information. * \param dptr The data pointer of the source array. * \param dtype The type of the source data. * \param num Number of elements in the source array. */ void SetInfo(const char* key, const void* dptr, DataType dtype, size_t num); /*! * \brief Set information in the meta info with array interface. * \param key The key of the information. * \param interface_str String representation of json format array interface. * * [ column_0, column_1, ... column_n ] * * Right now only 1 column is permitted. */ void SetInfo(const char* key, std::string const& interface_str); private: /*! \brief argsort of labels */ mutable std::vector<size_t> label_order_cache_; }; /*! \brief Element from a sparse vector */ struct Entry { /*! \brief feature index */ bst_feature_t index; /*! \brief feature value */ bst_float fvalue; /*! \brief default constructor */ Entry() = default; /*! * \brief constructor with index and value * \param index The feature or row index. * \param fvalue The feature value. */ Entry(bst_feature_t index, bst_float fvalue) : index(index), fvalue(fvalue) {} /*! \brief reversely compare feature values */ inline static bool CmpValue(const Entry& a, const Entry& b) { return a.fvalue < b.fvalue; } inline bool operator==(const Entry& other) const { return (this->index == other.index && this->fvalue == other.fvalue); } }; /*! * \brief Parameters for constructing batches. */ struct BatchParam { /*! \brief The GPU device to use. */ int gpu_id; /*! \brief Maximum number of bins per feature for histograms. */ int max_bin; /*! \brief Number of rows in a GPU batch, used for finding quantiles on GPU. */ int gpu_batch_nrows; /*! \brief Page size for external memory mode. */ size_t gpu_page_size; inline bool operator!=(const BatchParam& other) const { return gpu_id != other.gpu_id || max_bin != other.max_bin || gpu_batch_nrows != other.gpu_batch_nrows || gpu_page_size != other.gpu_page_size; } }; /*! * \brief In-memory storage unit of sparse batch, stored in CSR format. */ class SparsePage { public: // Offset for each row. HostDeviceVector<bst_row_t> offset; /*! \brief the data of the segments */ HostDeviceVector<Entry> data; size_t base_rowid{}; /*! \brief an instance of sparse vector in the batch */ using Inst = common::Span<Entry const>; /*! \brief get i-th row from the batch */ inline Inst operator[](size_t i) const { const auto& data_vec = data.HostVector(); const auto& offset_vec = offset.HostVector(); size_t size; // in distributed mode, some partitions may not get any instance for a feature. Therefore // we should set the size as zero if (rabit::IsDistributed() && i + 1 >= offset_vec.size()) { size = 0; } else { size = offset_vec[i + 1] - offset_vec[i]; } return {data_vec.data() + offset_vec[i], static_cast<Inst::index_type>(size)}; } /*! \brief constructor */ SparsePage() { this->Clear(); } /*! \return Number of instances in the page. */ inline size_t Size() const { return offset.Size() - 1; } /*! \return estimation of memory cost of this page */ inline size_t MemCostBytes() const { return offset.Size() * sizeof(size_t) + data.Size() * sizeof(Entry); } /*! \brief clear the page */ inline void Clear() { base_rowid = 0; auto& offset_vec = offset.HostVector(); offset_vec.clear(); offset_vec.push_back(0); data.HostVector().clear(); } /*! \brief Set the base row id for this page. */ inline void SetBaseRowId(size_t row_id) { base_rowid = row_id; } SparsePage GetTranspose(int num_columns) const; void SortRows() { auto ncol = static_cast<bst_omp_uint>(this->Size()); #pragma omp parallel for default(none) shared(ncol) schedule(dynamic, 1) for (bst_omp_uint i = 0; i < ncol; ++i) { if (this->offset.HostVector()[i] < this->offset.HostVector()[i + 1]) { std::sort( this->data.HostVector().begin() + this->offset.HostVector()[i], this->data.HostVector().begin() + this->offset.HostVector()[i + 1], Entry::CmpValue); } } } /*! * \brief Push row block into the page. * \param batch the row batch. */ void Push(const dmlc::RowBlock<uint32_t>& batch); /*! * \brief Push a sparse page * \param batch the row page */ void Push(const SparsePage &batch); /*! * \brief Push a SparsePage stored in CSC format * \param batch The row batch to be pushed */ void PushCSC(const SparsePage& batch); }; class CSCPage: public SparsePage { public: CSCPage() : SparsePage() {} explicit CSCPage(SparsePage page) : SparsePage(std::move(page)) {} }; class SortedCSCPage : public SparsePage { public: SortedCSCPage() : SparsePage() {} explicit SortedCSCPage(SparsePage page) : SparsePage(std::move(page)) {} }; class EllpackPageImpl; /*! * \brief A page stored in ELLPACK format. * * This class uses the PImpl idiom (https://en.cppreference.com/w/cpp/language/pimpl) to avoid * including CUDA-specific implementation details in the header. */ class EllpackPage { public: /*! * \brief Default constructor. * * This is used in the external memory case. An empty ELLPACK page is constructed with its content * set later by the reader. */ EllpackPage(); /*! * \brief Constructor from an existing DMatrix. * * This is used in the in-memory case. The ELLPACK page is constructed from an existing DMatrix * in CSR format. */ explicit EllpackPage(DMatrix* dmat, const BatchParam& param); /*! \brief Destructor. */ ~EllpackPage(); /*! \return Number of instances in the page. */ size_t Size() const; /*! \brief Set the base row id for this page. */ void SetBaseRowId(size_t row_id); const EllpackPageImpl* Impl() const { return impl_.get(); } EllpackPageImpl* Impl() { return impl_.get(); } private: std::unique_ptr<EllpackPageImpl> impl_; }; template<typename T> class BatchIteratorImpl { public: virtual ~BatchIteratorImpl() = default; virtual T& operator*() = 0; virtual const T& operator*() const = 0; virtual void operator++() = 0; virtual bool AtEnd() const = 0; }; template<typename T> class BatchIterator { public: using iterator_category = std::forward_iterator_tag; explicit BatchIterator(BatchIteratorImpl<T>* impl) { impl_.reset(impl); } void operator++() { CHECK(impl_ != nullptr); ++(*impl_); } T& operator*() { CHECK(impl_ != nullptr); return *(*impl_); } const T& operator*() const { CHECK(impl_ != nullptr); return *(*impl_); } bool operator!=(const BatchIterator& rhs) const { CHECK(impl_ != nullptr); return !impl_->AtEnd(); } bool AtEnd() const { CHECK(impl_ != nullptr); return impl_->AtEnd(); } private: std::shared_ptr<BatchIteratorImpl<T>> impl_; }; template<typename T> class BatchSet { public: explicit BatchSet(BatchIterator<T> begin_iter) : begin_iter_(begin_iter) {} BatchIterator<T> begin() { return begin_iter_; } BatchIterator<T> end() { return BatchIterator<T>(nullptr); } private: BatchIterator<T> begin_iter_; }; /*! * \brief This is data structure that user can pass to DMatrix::Create * to create a DMatrix for training, user can create this data structure * for customized Data Loading on single machine. * * On distributed setting, usually an customized dmlc::Parser is needed instead. */ template<typename T> class DataSource : public dmlc::DataIter<T> { public: /*! * \brief Meta information about the dataset * The subclass need to be able to load this correctly from data. */ MetaInfo info; }; /*! * \brief Internal data structured used by XGBoost during training. * There are two ways to create a customized DMatrix that reads in user defined-format. * * - Provide a dmlc::Parser and pass into the DMatrix::Create * - Alternatively, if data can be represented by an URL, define a new dmlc::Parser and register by * DMLC_REGISTER_DATA_PARSER; * - This works best for user defined data input source, such as data-base, filesystem. * - Provide a DataSource, that can be passed to DMatrix::Create * This can be used to re-use inmemory data structure into DMatrix. */ class DMatrix { public: /*! \brief default constructor */ DMatrix() = default; /*! \brief meta information of the dataset */ virtual MetaInfo& Info() = 0; /*! \brief meta information of the dataset */ virtual const MetaInfo& Info() const = 0; /** * \brief Gets batches. Use range based for loop over BatchSet to access individual batches. */ template<typename T> BatchSet<T> GetBatches(const BatchParam& param = {}); // the following are column meta data, should be able to answer them fast. /*! \return Whether the data columns single column block. */ virtual bool SingleColBlock() const = 0; /*! \brief get column density */ virtual float GetColDensity(size_t cidx) = 0; /*! \brief virtual destructor */ virtual ~DMatrix() = default; /*! * \brief Save DMatrix to local file. * The saved file only works for non-sharded dataset(single machine training). * This API is deprecated and dis-encouraged to use. * \param fname The file name to be saved. * \return The created DMatrix. */ virtual void SaveToLocalFile(const std::string& fname); /*! \brief Whether the matrix is dense. */ bool IsDense() const { return Info().num_nonzero_ == Info().num_row_ * Info().num_col_; } /*! * \brief Load DMatrix from URI. * \param uri The URI of input. * \param silent Whether print information during loading. * \param load_row_split Flag to read in part of rows, divided among the workers in distributed mode. * \param file_format The format type of the file, used for dmlc::Parser::Create. * By default "auto" will be able to load in both local binary file. * \param page_size Page size for external memory. * \return The created DMatrix. */ static DMatrix* Load(const std::string& uri, bool silent, bool load_row_split, const std::string& file_format = "auto", size_t page_size = kPageSize); /*! * \brief create a new DMatrix, by wrapping a row_iterator, and meta info. * \param source The source iterator of the data, the create function takes ownership of the source. * \param cache_prefix The path to prefix of temporary cache file of the DMatrix when used in external memory mode. * This can be nullptr for common cases, and in-memory mode will be used. * \return a Created DMatrix. */ static DMatrix* Create(std::unique_ptr<DataSource<SparsePage>>&& source, const std::string& cache_prefix = ""); /** * \brief Creates a new DMatrix from an external data adapter. * * \tparam AdapterT Type of the adapter. * \param adapter View onto an external data. * \param missing Values to count as missing. * \param nthread Number of threads for construction. * * \return a Created DMatrix. */ template <typename AdapterT> static DMatrix* Create(AdapterT* adapter, float missing, int nthread); /*! * \brief Create a DMatrix by loading data from parser. * Parser can later be deleted after the DMatrix i created. * \param parser The input data parser * \param cache_prefix The path to prefix of temporary cache file of the DMatrix when used in external memory mode. * This can be nullptr for common cases, and in-memory mode will be used. * \param page_size Page size for external memory. * \sa dmlc::Parser * \note dmlc-core provides efficient distributed data parser for libsvm format. * User can create and register customized parser to load their own format using DMLC_REGISTER_DATA_PARSER. * See "dmlc-core/include/dmlc/data.h" for detail. * \return A created DMatrix. */ static DMatrix* Create(dmlc::Parser<uint32_t>* parser, const std::string& cache_prefix = "", size_t page_size = kPageSize); /*! \brief page size 32 MB */ static const size_t kPageSize = 32UL << 20UL; protected: virtual BatchSet<SparsePage> GetRowBatches() = 0; virtual BatchSet<CSCPage> GetColumnBatches() = 0; virtual BatchSet<SortedCSCPage> GetSortedColumnBatches() = 0; virtual BatchSet<EllpackPage> GetEllpackBatches(const BatchParam& param) = 0; }; template<> inline BatchSet<SparsePage> DMatrix::GetBatches(const BatchParam&) { return GetRowBatches(); } template<> inline BatchSet<CSCPage> DMatrix::GetBatches(const BatchParam&) { return GetColumnBatches(); } template<> inline BatchSet<SortedCSCPage> DMatrix::GetBatches(const BatchParam&) { return GetSortedColumnBatches(); } template<> inline BatchSet<EllpackPage> DMatrix::GetBatches(const BatchParam& param) { return GetEllpackBatches(param); } } // namespace xgboost namespace dmlc { DMLC_DECLARE_TRAITS(is_pod, xgboost::Entry, true); } #endif // XGBOOST_DATA_H_
ofmo-integ.c
/** * @file ofmo-integ.c * Hartree-Fock分子軌道法、および、それに基づいたFMO法で * 必要となる各種分子積分を行うための最上位関数群を定義している * ファイル。 * */ /** * @defgroup integ 分子積分クラス * 通常のHartree-Fock(HF)分子軌道計算で用いる1電子積分(運動エネルギー * 積分、核-引力積分、重なり積分)や2電子積分だけでなく、FMO法に出現する * 4中心(3中心、2中心)の各種クーロン積分、および、カットオフテーブル * を作成する関数などを定義している。 * * @ingroup ofmo * * */ /** * @defgroup integ-top 分子積分の最上位クラス * @brief 積分計算を必要とする関数から呼ばれる最上位関数 * @ingroup integ * */ /** * @defgroup integ-med 同じタイプの積分をまとめて行う関数群 * @brief 同じタイプの積分をまとめて計算するための関数クラス * @ingroup integ * */ /** * @defgroup integ-core 1つの縮約積分を計算する関数クラス * @brief 1つの縮約積分を計算するための関数クラス * @ingroup integ * */ /** * @defgroup integ-misc 雑多な処理を行う関数クラス * @brief 積分計算以外の処理を行う関数クラス * @ingroup integ * */ /** * @defgroup integ-fmt 誤差関数計算を行う関数クラス * @brief 分子積分で必要となる誤差関数の計算を行う関数クラス * @ingroup integ * */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #include "ofmo-cutoff.h" #include "ofmo-ifc2c.h" #include "ofmo-oneint.h" #include "ofmo-twoint.h" #include "ofmo-twoint-buffer.h" #include "ofmo-twoint-direct.h" #include "ofmo-ifc4c.h" #include "ofmo-ifc3c.h" #include "fmt.h" #include "fmt-m.h" #include "ofmo-rys-xxxx.h" #include "ofmo-os-xxxx.h" #include "ofmo-def.h" #include "ofmo-prof.h" #include "ofmo-parallel.h" #include "ofmo-tlog.h" #ifdef OFMO_SKELETON #include "rhf/skel-w2e.h" #else #define start_w2e() #define set_w2e(Labcd) #endif double x_coef; //DFT; HF exchange coef. //#define Free(a) if ( a != NULL ) free( a ); a = NULL extern int ofmo_twoint_xxxx( const int *pnworkers, const int *pworkerid, const int *pLa, const int *pLb, const int *pLc, const int *pLd, const int shel_atm[], const int shel_ini[], const double atom_x[], const double atom_y[], const double atom_z[], const int leading_cs_pair[], const double csp_schwarz[], const int csp_ics[], const int csp_jcs[], const int csp_leading_ps_pair[], const double psp_zeta[], const double psp_dkps[], const double psp_xiza[], // for partially direct SCF const long *pebuf_max_nzeri, long *ebuf_non_zero_eri, double ebuf_val[], short int ebuf_ind4[], int *last_ijcs, int *last_klcs ); extern int ofmo_twoint_direct_xxxx( const int *pnworkers, const int *pworkerid, const int *pLa, const int *pLb, const int *pLc, const int *pLd, const int shel_atm[], const int shel_ini[], const double atom_x[], const double atom_y[], const double atom_z[], const int leading_cs_pair[], const double csp_schwarz[], const int csp_ics[], const int csp_jcs[], const int csp_leading_ps_pair[], const double psp_zeta[], const double psp_dkps[], const double psp_xiza[], // for direct SCF const long *petmp_max_nzeri, long *petmp_non_zero_eri, double etmp_val[], short int etmp_ind4[], const int *plast_ijcs, const int *plast_klcs, // density matrix & G-matrix data const int *pnao, const double Ds[], double G[] ); extern int ofmo_twoint_rys_xxxx( const int *pnworkers, const int *pworkerid, const int *pLa, const int *pLb, const int *pLc, const int *pLd, const int shel_atm[], const int shel_ini[], const double atom_x[], const double atom_y[], const double atom_z[], const int leading_cs_pair[], const double csp_schwarz[], const int csp_ics[], const int csp_jcs[], const int csp_leading_ps_pair[], const double psp_zeta[], const double psp_dkps[], const double psp_xiza[], // for partially direct SCF const long *pebuf_max_nzeri, long *ebuf_non_zero_eri, double ebuf_val[], short int ebuf_ind4[], int *last_ijcs, int *last_klcs ); extern int ofmo_twoint_direct_rys_xxxx( const int *pnworkers, const int *pworkerid, const int *pLa, const int *pLb, const int *pLc, const int *pLd, const int shel_atm[], const int shel_ini[], const double atom_x[], const double atom_y[], const double atom_z[], const int leading_cs_pair[], const double csp_schwarz[], const int csp_ics[], const int csp_jcs[], const int csp_leading_ps_pair[], const double psp_zeta[], const double psp_dkps[], const double psp_xiza[], // for direct SCF const long *petmp_max_nzeri, long *petmp_non_zero_eri, double etmp_val[], short int etmp_ind4[], const int *plast_ijcs, const int *plast_klcs, // density matrix & G-matrix data const int *pnao, const double Ds[], double G[] ); extern int ofmo_OS_integ_init( const int maxlqn ); extern int ofmo_oneint_gen_init( const int maxlqn ); extern int ofmo_Rys_integ_init( const int maxlqn ); /*// for Fortran code extern void fmt4_initialize_(); extern void fmt4_gen_initialize_(); extern void fort_const_();*/ #ifdef USE_CUDA #include "cuda/cudalib.h" #include "cuda/cuda-integ.h" #include "cuda/cuda-ifc4c.h" #include "cuda/cuda-ifc4c-calc.h" extern int cuda_fmt_initialize(); #endif /* ==================================================== 負荷分散などのための制御変数に関係する関数群 ==================================================== */ // to control load-balancing static int *target_type = NULL; static size_t *loop_offset = NULL; static int OFMO_MAX_THREADS = 1; static void finalize_ctrl() { Free( target_type ); Free( loop_offset ); OFMO_MAX_THREADS = 1; } static int init_ctrl() { static int called = false; int maxthreads, i; if ( !called ) { maxthreads = omp_get_max_threads(); target_type = (int*)malloc( sizeof(int) * maxthreads ); loop_offset = (size_t*)malloc( sizeof(size_t) * maxthreads ); OFMO_MAX_THREADS = maxthreads; for ( i=0; i<maxthreads; i++ ) { target_type[i] = -1; loop_offset[i] = 0; } atexit( finalize_ctrl ); called = true; } return 0; } size_t ofmo_integ_get_loop_offset( const int mythread ) { return loop_offset[mythread]; } void ofmo_integ_set_loop_offset( const int mythread, const size_t offset ) { loop_offset[mythread] = offset; } void ofmo_integ_set_target_type( const int mythread, const int ttype ) { target_type[mythread] = ttype; } static int ofmo_integ_get_target_type( const int mythread ) { return target_type[mythread]; } int ofmo_integ_init( int maxlqn ) { static int called = false; if ( called == false ) { init_ctrl(); // added for load-balancing fmt_initialize( maxlqn ); fmt_m_init(); #ifdef USE_CUDA int ret = cuda_fmt_initialize(); if (ret<0) exit(1); #endif ofmo_twoint_init(); ofmo_ifc3c_os_init(); ofmo_ifc3c_rys_init(); ofmo_ifc2c_init(); ofmo_oneint_init(); ofmo_cutoff_init(); // ofmo_OS_integ_init( maxlqn ); ofmo_oneint_gen_init( maxlqn ); // ofmo_Rys_integ_init( maxlqn ); called = true; } return 0; } extern int ofmo_cutoff_xx( // input arguments const int *pLa, const int *pLb, const int leading_cs[], const int shel_tem[], const int shel_atm[], const int shel_add[], const double atom_x[], const double atom_y[], const double atom_z[], const double prim_exp[], const double prim_coe[], // output arguments int leading_cs_pair[], double csp_schwarz[], int csp_ics[], int csp_jcs[], int csp_leading_ps_pair[], double psp_zeta[], double psp_dkps[], double psp_xiza[] ); /* top level code for making cutoff table */ static int (*calc_schwarz[]) ( const int *pLa, const int *pLb, const int leading_cs[], const int shel_tem[], const int shel_atm[], const int shel_add[], const double atom_x[], const double atom_y[], const double atom_z[], const double prim_exp[], const double prim_coe[], int leading_cs_pair[], double csp_schwarz[], int csp_ics[], int csp_jcs[], int csp_leading_ps_pair[], double psp_zeta[], double psp_dkps[], double psp_xiza[]) = { ofmo_cutoff_ss_, /*ofmo_cutoff_ps_, ofmo_cutoff_pp_, ofmo_cutoff_ds_, ofmo_cutoff_dp_, ofmo_cutoff_dd_,*/ ofmo_cutoff_xx, ofmo_cutoff_xx, ofmo_cutoff_xx, ofmo_cutoff_xx, ofmo_cutoff_xx }; /** Schwarzの不等式を用いるためのカットオフテーブル作成関数 * @ingroup integ-top * * Schwarzの不等式を用いたカットオフテーブルを作成する。 * ソート基底関数データを与えると、Schwarzの不等式を用いるための * カットオフテーブルが計算されて返ってくる。 * * @attention * @li 出力用の各種配列は、呼び出し時には確保されている必要がある。 * @li 出力用各種配列(\c csp_schwarz[] や \c csp_ics[] など)が複数 * スレッド間で共有されていなければ、スレッドセーフである。 * * @param[in] maxlqn 最大軌道量子数 * @param[in] leading_cs[lqn] 軌道量子数 \c lqn の先頭CS番号 * @param[in] shel_tem[ics] CS番号 \c ics のCSの縮約長 * @param[in] shel_atm[ics] CS番号 \c ics のCSが属する原子の番号 * @param[in] shel_add[ics] CS番号 \c ics のCSに含まれるPSの先頭PS番号 * @param[in] atom_x[iat] 原子の番号 \c iat のx座標(au単位) * @param[in] atom_y[iat] 原子の番号 \c iat のy座標(au単位) * @param[in] atom_z[iat] 原子の番号 \c iat のz座標(au単位) * @param[in] prim_exp[ips] PS番号 \c ips のPSの軌道指数 * @param[in] prim_coe[ips] PS番号 \c ips のPSの規格化定数込みの縮約係数 * * @param[out] leading_cs_pair[itype] CSペアタイプ番号 \c itype の * 先頭CSペア番号 * @param[out] csp_schwarz[icsp] CSペア番号 \c icsp のSchwarz積分 * @param[out] csp_ics[icsp] CSペア番号 \c icsp の1つ目のCS番号 * @param[out] csp_jcs[icsp] CSペア番号 \c icsp の2つめのCS番号。ただし、 * \f$ \tt{csp\_ics[icsp]} \ge \tt{csp\_jcs[icsp]} \f$ である。 * @param[out] csp_leading_ps_pair[icsp] CSペア番号 \c icsp に含まれる * PSペアの先頭PSペア番号 * @param[out] psp_zeta[ipsp] PSペア番号 \c ipsp の軌道指数和 * \f$ \zeta = \zeta_a + \zeta_b \f$ * @param[out] psp_dkps[ipsp] PSペア番号 \c ipsp の線型結合定数 * \f[ K_{ab} = \sqrt2 \pi^{5/4} \frac1{\zeta_a+\zeta_b} * \exp\left[ -\frac{\zeta_a \zeta_b}{\zeta_a + \zeta_b} * ( \boldmath A \unboldmath - \boldmath B \unboldmath )^2 * \right]\f] * @param[out] psp_xiza[ipsp] PSペア番号 \c ipsp の * \f$ \frac{\xi}{\zeta_a} = \frac{\zeta_b}{\zeta_a+\zeta_b} \f$ * * @retval 0 正常終了 * @retval -1 異常終了(サポートしていない積分タイプがあったなど) * * */ int ofmo_cutoff_make_table( // input arguments const int maxlqn, const int leading_cs[], const int shel_tem[], const int shel_atm[], const int shel_add[], const double atom_x[], const double atom_y[], const double atom_z[], const double prim_exp[], const double prim_coe[], // output arguments int leading_cs_pair[], double csp_schwarz[], int csp_ics[], int csp_jcs[], int csp_leading_ps_pair[], double psp_zeta[], double psp_dkps[], double psp_xiza[] ) { int La, Lb, Lab; leading_cs_pair[0] = 0; csp_leading_ps_pair[0] = 0; for ( La=0; La<=maxlqn; La++ ) { for ( Lb=0; Lb<=La; Lb++ ) { Lab = La*(La+1)/2 + Lb; if ( Lab == 0 ) { ofmo_cutoff_ss_( &La, &Lb, leading_cs, shel_tem, shel_atm, shel_add, atom_x, atom_y, atom_z, prim_exp, prim_coe, leading_cs_pair, csp_schwarz, csp_ics, csp_jcs, csp_leading_ps_pair, psp_zeta, psp_dkps, psp_xiza ); } else { ofmo_cutoff_xx( &La, &Lb, leading_cs, shel_tem, shel_atm, shel_add, atom_x, atom_y, atom_z, prim_exp, prim_coe, leading_cs_pair, csp_schwarz, csp_ics, csp_jcs, csp_leading_ps_pair, psp_zeta, psp_dkps, psp_xiza ); } #ifdef SORT_CSP ofmo_cutoff_sort_( La, Lb, leading_cs, shel_tem, shel_atm, shel_add, atom_x, atom_y, atom_z, prim_exp, prim_coe, leading_cs_pair, csp_schwarz, csp_ics, csp_jcs, csp_leading_ps_pair, psp_zeta, psp_dkps, psp_xiza ); #endif } // Lb } // La return 0; } static int isum( const int n, const int a[] ) { int i, sum; for ( i=0, sum=0; i<n; i++ ) sum += a[i]; return sum; } extern int ofmo_oneint_xx( const int *pnworkers, const int *pworkerid, const int *pLa, const int *pLb, const int leading_cs[], const int shel_tem[], const int shel_atm[], const int shel_add[], const int shel_ini[], const double atom_x[], const double atom_y[], const double atom_z[], const double prim_exp[], const double prim_coe[], const int *pnat, const int atomic_number[], double S[], double H[] ); static int (*oneint_func[]) ( // parallelization const int *pnworkers, const int *pworkerid, // integral type data const int *pLa, const int *pLb, // basis set data for fragment const int leading_cs[], const int shel_tem[], const int shel_atm[], const int shel_add[], const int shel_ini[], const double atom_x[], const double atom_y[], const double atom_z[], const double prim_exp[], const double prim_coe[], const int *pnat, const int atomic_number[], double S[], double H[] ) = { /*ofmo_oneint_ss_, ofmo_oneint_ps_, ofmo_oneint_pp_, ofmo_oneint_ds_, ofmo_oneint_dp_, ofmo_oneint_dd_,*/ ofmo_oneint_ss__, ofmo_oneint_ps__, ofmo_oneint_pp__, /*ofmo_oneint_xx, ofmo_oneint_xx, ofmo_oneint_xx, ofmo_oneint_xx, ofmo_oneint_xx,*/ ofmo_oneint_ds__, ofmo_oneint_dp__, ofmo_oneint_dd__, }; /** ソートされた一電子積分計算関数 * @ingroup integ-top * * ソート基底関数を与えると、一電子積分を計算して * ソートされた重なり積分、一電子ハミルトン行列を返す。 * * @attention * @li 出力用の各種配列は、呼び出し時には確保されている必要がある * @li スレッド並列時の関数呼び出しは、スレッド並列領域内から行う必要 * がある * @li 1プロセスで実行する場合には、関数終了時点で(スレッド * 並列化時には全スレッドが関数から返った時点で)完全な重なり行列や * 一電子ハミルトン行列が得られる。 * @li 複数プロセスで実行する場合には、関数終了時点では部分の結果しか * 得られていない。完全な結果を得るためには、\c MPI_Allreduce などの * 関数を用いたリダクション処理が必要である。 * * @param[in] nworkers 計算に用いるワーカプロセス(スレッド)数 * @param[in] workerid 各ワーカプロセス(スレッド)のID。 * \f$ 0\le\tt{workerid}<\tt{nworkers} \f$である。 * @param[in] maxlqn 最大軌道量子数 * @param[in] leading_cs[lqn] 軌道量子数 \c lqn の先頭CS番号 * @param[in] shel_tem[ics] CS番号 \c ics のCSの縮約長 * @param[in] shel_atm[ics] CS番号 \c ics のCSが属する原子の番号 * @param[in] shel_add[ics] CS番号 \c ics のCSに含まれるPSの先頭PS番号 * @param[in] shel_ini[ics] CS番号 \c ics のCSに含まれるAOの先頭AO番号 * @param[in] atom_x[iat] 原子の番号 \c iat のx座標(au単位) * @param[in] atom_y[iat] 原子の番号 \c iat のy座標(au単位) * @param[in] atom_z[iat] 原子の番号 \c iat のz座標(au単位) * @param[in] prim_exp[ips] PS番号 \c ips のPSの軌道指数 * @param[in] prim_coe[ips] PS番号 \c ips のPSの規格化定数込みの縮約係数 * * @param[in] nat 原子数 * @param[in] atomic_number[iat] 原子の番号 \c iat の原子番号 * * @param[out] S[] 重なり行列(圧縮"U"形式)。 * @param[out] H[] 一電子ハミルトン行列(圧縮"U"形式)。 * * @retval 0 正常終了 * @retval -1 異常終了(いま(2011/06/13)のところ考えていない) * */ int ofmo_integ_oneint_sorted( const int nworkers, const int workerid, const int maxlqn, const int leading_cs[], const int shel_tem[], const int shel_atm[], const int shel_add[], const int shel_ini[], const double atom_x[], const double atom_y[], const double atom_z[], const double prim_exp[], const double prim_coe[], const int nat, const int atomic_number[], double S[], double H[]) { int La, Lb, Lab, sum; //double dsum; sum = isum( nat, atomic_number ); ofmo_oneint_set_sum_atomic_numbers( sum ); for ( La=0; La<=maxlqn; La++ ) { for ( Lb=0; Lb<=La; Lb++ ) { Lab = La*(La+1)/2+Lb; if ( Lab == 0 ) { ofmo_oneint_ss__( &nworkers, &workerid, &La, &Lb, leading_cs, shel_tem, shel_atm, shel_add, shel_ini, atom_x, atom_y, atom_z, prim_exp, prim_coe, &nat, atomic_number, S, H ); } else { ofmo_oneint_xx( &nworkers, &workerid, &La, &Lb, leading_cs, shel_tem, shel_atm, shel_add, shel_ini, atom_x, atom_y, atom_z, prim_exp, prim_coe, &nat, atomic_number, S, H ); } } } return 0; } static int (*calc_twoint_buffer[])( const int *pnworkers, const int *pworkerid, const int *pLa, const int *pLb, const int *pLc, const int *pLd, const int shel_atm[], const int shel_ini[], const double atom_x[], const double atom_y[], const double atom_z[], const int leading_cs_pair[], const double csp_schwarz[], const int csp_ics[], const int csp_jcs[], const int csp_leading_ps_pair[], const double psp_zeta[], const double psp_dkps[], const double psp_xiza[], // for partially direct SCF const long *pebuf_max_nzeri, long *ebuf_non_zero_eri, double ebuf_val[], short int ebuf_ind4[], int *last_ijcs, int *last_klcs ) = { ofmo_twoint_buffer_ssss__, // (SS,SS)はすべて同じコードを使用 /*// original (OS, 個別、d関数は元の並び) ofmo_twoint_buffer_psss__, ofmo_twoint_buffer_psps__, ofmo_twoint_buffer_ppss__, ofmo_twoint_buffer_ppps__, ofmo_twoint_buffer_pppp__, ofmo_twoint_buffer_dsss__, ofmo_twoint_buffer_dsps__, ofmo_twoint_buffer_dspp__, ofmo_twoint_buffer_dsds__, ofmo_twoint_buffer_dpss__, ofmo_twoint_buffer_dpps__, ofmo_twoint_buffer_dppp__, ofmo_twoint_buffer_dpds__, ofmo_twoint_buffer_dpdp__, ofmo_twoint_buffer_ddss__, ofmo_twoint_buffer_ddps__, ofmo_twoint_buffer_ddpp__, ofmo_twoint_buffer_ddds__, ofmo_twoint_buffer_dddp__, ofmo_twoint_buffer_dddd__,*/ /*// Obara-Saika(一般式、C言語) ofmo_twoint_xxxx, ofmo_twoint_xxxx, ofmo_twoint_xxxx, ofmo_twoint_xxxx, ofmo_twoint_xxxx, ofmo_twoint_xxxx, ofmo_twoint_xxxx, ofmo_twoint_xxxx, ofmo_twoint_xxxx, ofmo_twoint_xxxx, ofmo_twoint_xxxx, ofmo_twoint_xxxx, ofmo_twoint_xxxx, ofmo_twoint_xxxx, ofmo_twoint_xxxx, ofmo_twoint_xxxx, ofmo_twoint_xxxx, ofmo_twoint_xxxx, ofmo_twoint_xxxx, ofmo_twoint_xxxx,*/ // Obara-Saika(個別、C言語) ofmo_twoint_os_psss, ofmo_twoint_os_psps, ofmo_twoint_os_ppss, ofmo_twoint_os_ppps, ofmo_twoint_os_pppp, ofmo_twoint_os_dsss, ofmo_twoint_os_dsps, ofmo_twoint_os_dspp, ofmo_twoint_os_dsds, ofmo_twoint_os_dpss, ofmo_twoint_os_dpps, ofmo_twoint_os_dppp, ofmo_twoint_os_dpds, ofmo_twoint_os_dpdp, ofmo_twoint_os_ddss, ofmo_twoint_os_ddps, ofmo_twoint_os_ddpp, ofmo_twoint_os_ddds, ofmo_twoint_os_dddp, ofmo_twoint_os_dddd, /*// Rys求積法(一般式、C言語) ofmo_twoint_rys_xxxx, ofmo_twoint_rys_xxxx, ofmo_twoint_rys_xxxx, ofmo_twoint_rys_xxxx, ofmo_twoint_rys_xxxx, ofmo_twoint_rys_xxxx, ofmo_twoint_rys_xxxx, ofmo_twoint_rys_xxxx, ofmo_twoint_rys_xxxx, ofmo_twoint_rys_xxxx, ofmo_twoint_rys_xxxx, ofmo_twoint_rys_xxxx, ofmo_twoint_rys_xxxx, ofmo_twoint_rys_xxxx, ofmo_twoint_rys_xxxx, ofmo_twoint_rys_xxxx, ofmo_twoint_rys_xxxx, ofmo_twoint_rys_xxxx, ofmo_twoint_rys_xxxx, ofmo_twoint_rys_xxxx,*/ /* // Rsy求積法(個別、C言語) ofmo_twoint_rys_psss, ofmo_twoint_rys_psps, ofmo_twoint_rys_ppss, ofmo_twoint_rys_ppps, ofmo_twoint_rys_pppp, ofmo_twoint_rys_dsss, ofmo_twoint_rys_dsps, ofmo_twoint_rys_dspp, ofmo_twoint_rys_dsds, ofmo_twoint_rys_dpss, ofmo_twoint_rys_dpps, ofmo_twoint_rys_dppp, ofmo_twoint_rys_dpds, ofmo_twoint_rys_dpdp, ofmo_twoint_rys_ddss, ofmo_twoint_rys_ddps, ofmo_twoint_rys_ddpp, ofmo_twoint_rys_ddds, ofmo_twoint_rys_dddp, ofmo_twoint_rys_dddd,*/ }; static int (*calc_twoint_direct[])( // paralleization const int *pnworkers, const int *pworkerid, // integral type data const int *pLa, const int *pLb, const int *pLc, const int *pLd, // basis set & cutoff table data const int shel_atm[], const int shel_ini[], const double atom_x[], const double atom_y[], const double atom_z[], const int leading_cs_pair[], const double csp_schwarz[], const int csp_ics[], const int csp_jcs[], const int csp_leading_ps_pair[], const double psp_zeta[], const double psp_dkps[], const double psp_xiza[], // concerned about buffered direct method const long *petmp_max_nzeri, long *petmp_non_zero_eri, double etmp_val[], short int etmp_ind4[], const int *plast_ijcs, const int *plast_klcs, // density matrix & G-matrix data const int *pnao, const double Ds[], double G[] ) = { ofmo_twoint_direct_ssss__, /*// Obara-Saika式(一般式、C言語) ofmo_twoint_direct_xxxx, ofmo_twoint_direct_xxxx, ofmo_twoint_direct_xxxx, ofmo_twoint_direct_xxxx, ofmo_twoint_direct_xxxx, ofmo_twoint_direct_xxxx, ofmo_twoint_direct_xxxx, ofmo_twoint_direct_xxxx, ofmo_twoint_direct_xxxx, ofmo_twoint_direct_xxxx, ofmo_twoint_direct_xxxx, ofmo_twoint_direct_xxxx, ofmo_twoint_direct_xxxx, ofmo_twoint_direct_xxxx, ofmo_twoint_direct_xxxx, ofmo_twoint_direct_xxxx, ofmo_twoint_direct_xxxx, ofmo_twoint_direct_xxxx, ofmo_twoint_direct_xxxx, ofmo_twoint_direct_xxxx,*/ // Obara-Saika式(個別、C言語) ofmo_twoint_direct_os_psss, ofmo_twoint_direct_os_psps, ofmo_twoint_direct_os_ppss, ofmo_twoint_direct_os_ppps, ofmo_twoint_direct_os_pppp, ofmo_twoint_direct_os_dsss, ofmo_twoint_direct_os_dsps, ofmo_twoint_direct_os_dspp, ofmo_twoint_direct_os_dsds, ofmo_twoint_direct_os_dpss, ofmo_twoint_direct_os_dpps, ofmo_twoint_direct_os_dppp, ofmo_twoint_direct_os_dpds, ofmo_twoint_direct_os_dpdp, ofmo_twoint_direct_os_ddss, ofmo_twoint_direct_os_ddps, ofmo_twoint_direct_os_ddpp, ofmo_twoint_direct_os_ddds, ofmo_twoint_direct_os_dddp, ofmo_twoint_direct_os_dddd, /*// Rys求積法(一般式、C言語) ofmo_twoint_direct_rys_xxxx, ofmo_twoint_direct_rys_xxxx, ofmo_twoint_direct_rys_xxxx, ofmo_twoint_direct_rys_xxxx, ofmo_twoint_direct_rys_xxxx, ofmo_twoint_direct_rys_xxxx, ofmo_twoint_direct_rys_xxxx, ofmo_twoint_direct_rys_xxxx, ofmo_twoint_direct_rys_xxxx, ofmo_twoint_direct_rys_xxxx, ofmo_twoint_direct_rys_xxxx, ofmo_twoint_direct_rys_xxxx, ofmo_twoint_direct_rys_xxxx, ofmo_twoint_direct_rys_xxxx, ofmo_twoint_direct_rys_xxxx, ofmo_twoint_direct_rys_xxxx, ofmo_twoint_direct_rys_xxxx, ofmo_twoint_direct_rys_xxxx, ofmo_twoint_direct_rys_xxxx, ofmo_twoint_direct_rys_xxxx,*/ /*// Rys求積法(個別、C言語) ofmo_twoint_direct_rys_psss, ofmo_twoint_direct_rys_psps, ofmo_twoint_direct_rys_ppss, ofmo_twoint_direct_rys_ppps, ofmo_twoint_direct_rys_pppp, ofmo_twoint_direct_rys_dsss, ofmo_twoint_direct_rys_dsps, ofmo_twoint_direct_rys_dspp, ofmo_twoint_direct_rys_dsds, ofmo_twoint_direct_rys_dpss, ofmo_twoint_direct_rys_dpps, ofmo_twoint_direct_rys_dppp, ofmo_twoint_direct_rys_dpds, ofmo_twoint_direct_rys_dpdp, ofmo_twoint_direct_rys_ddss, ofmo_twoint_direct_rys_ddps, ofmo_twoint_direct_rys_ddpp, ofmo_twoint_direct_rys_ddds, ofmo_twoint_direct_rys_dddp, ofmo_twoint_direct_rys_dddd,*/ }; /** @example oneint-serial.c * 1電子積分関数のシリアル実行時のコード例。 * */ /** @example oneint-mt.c * 1電子積分関数のスレッド並列実行時のコード例。 * * 関数は、スレッド並列領域内から呼ばれている。 * 関数が終了して全スレッドで同期がとれた時点で、完全な行列が得られる。 * */ /** @example oneint-mpi.c * 1電子積分関数のフラットMPI並列時のコード例。 * * 関数呼び出し後にリダクション処理を行うことで、完全な行列が得られる。 * */ /** @example oneint-hybrid.c * 1電子積分関数のOpenMPとMPIによるハイブリッド並列化の例。 * * フラットMPIの場合と同様に、関数呼び出し後にリダクション処理を行う * ことで、完全な行列が得られる。 * * */ /** 電子反発積分(二電子積分)計算関数(1) * @ingroup integ-top * * 指定されたサイズのバッファに入るだけの二電子積分を計算、保存する。 * バッファが一杯になったら、それ以降の積分計算は行わない。また、 * 二電子ハミルトン行列(G行列)の計算は行わない。 * * @attention * @li G行列生成関数 \c ofmo_integ_gen_gmat を呼び出す前に、一回、 * 呼び出す必要がある。この関数を呼ばずに \c ofmo_integ_gen_gmat 関数 * を呼び出すと、結果がおかしくなる場合がある。 * @li FMO計算のように一回の実行で複数回のSCF計算を行う場合には、 * SCF計算を行う度に、一度、呼び出す必要がある。 * @li OpenMPによるスレッド並列化が行われている。スレッド並列実行を * 行うためには、この関数をスレッド並列領域内で呼び出す必要がある。 * @li 適切に\c nworkers と \c workerid を設定すれば、MPIと組み合わせた * ハイブリッド並列化にも対応している。 * @li 二電子積分を保存するためのバッファは、関数内部で確保される。また、 * 確保したバッファは、プログラム終了時に開放される。 * * @param[in] nworkers 計算に用いるワーカプロセス(スレッド)数 * @param[in] workerid 各ワーカプロセス(スレッド)のID。 * \f$ 0\le\tt{workerid}<\tt{nworkers} \f$である。 * @param[in] ebuf_buffer_size_mb 二電子積分の値を保存するための * バッファサイズ(MB単位)。このバッファには、積分の値だけでなく * 4つの添字も保存される。 * @param[in] maxlqn 最大軌道量子数 * @param[in] shel_atm[ics] CS番号 \c ics のCSが属する原子の番号 * @param[in] shel_ini[ics] CS番号 \c ics のCSに含まれるAOの先頭AO番号 * @param[in] atom_x[iat] 原子の番号 \c iat のx座標(au単位) * @param[in] atom_y[iat] 原子の番号 \c iat のy座標(au単位) * @param[in] atom_z[iat] 原子の番号 \c iat のz座標(au単位) * @param[in] leading_cs_pair[itype] CSペアタイプ番号 \c itype の * 先頭CSペア番号 * @param[in] csp_schwarz[icsp] CSペア番号 \c icsp のSchwarz積分 * @param[in] csp_ics[icsp] CSペア番号 \c icsp の1つ目のCS番号 * @param[in] csp_jcs[icsp] CSペア番号 \c icsp の2つめのCS番号。ただし、 * \f$ \tt{csp\_ics[icsp]} \ge \tt{csp\_jcs[icsp]} \f$ である。 * @param[in] csp_leading_ps_pair[icsp] CSペア番号 \c icsp に含まれる * PSペアの先頭PSペア番号 * @param[in] psp_zeta[ipsp] PSペア番号 \c ipsp の軌道指数和 * \f$ \zeta = \zeta_a + \zeta_b \f$ * @param[in] psp_dkps[ipsp] PSペア番号 \c ipsp の線型結合定数 * \f[ K_{ab} = \sqrt2 \pi^{5/4} \frac1{\zeta_a+\zeta_b} * \exp\left[ -\frac{\zeta_a \zeta_b}{\zeta_a + \zeta_b} * ( \boldmath A \unboldmath - \boldmath B \unboldmath )^2 * \right]\f] * @param[in] psp_xiza[ipsp] PSペア番号 \c ipsp の * \f$ \frac{\xi}{\zeta_a} = \frac{\zeta_b}{\zeta_a+\zeta_b} \f$ * * * @retval 0 すべての積分がバッファに保存されて終了(in core SCF) * @retval 1 バッファが一杯になって終了(partially direct SCF) * * */ int ofmo_integ_twoint_first( // parallelization const int nworkers, const int workerid, // buffer size const size_t ebuf_buffer_size_mb, // basis set & cutoff table data const int maxlqn, const int shel_atm[], const int shel_ini[], const double atom_x[], const double atom_y[], const double atom_z[], const int leading_cs_pair[], const double csp_schwarz[], const int csp_ics[], const int csp_jcs[], const int csp_leading_ps_pair[], const double psp_zeta[], const double psp_dkps[], const double psp_xiza[] ) { int La, Lb, Lc, Ld, Lab, Lcd, Labcd; int mythread = 0, ret_code; int last_ijcs, last_klcs; long ebuf_non_zero_eri, ebuf_max_nzeri; short int *ebuf_ind4; double *ebuf_eri; // debug char *CS = "spdfg"; // added for load-balancing int local_id=workerid; size_t offset; mythread = omp_get_thread_num(); ebuf_max_nzeri = (long) ofmo_twoint_set_buffer_size( mythread, ebuf_buffer_size_mb ); ebuf_ind4 = ofmo_twoint_get_ebuf_ind4( mythread ); ebuf_eri = ofmo_twoint_get_ebuf_eri( mythread ); last_ijcs = last_klcs = -1; ebuf_non_zero_eri = 0; // added for load-balancing //offset = ofmo_integ_get_loop_offset( mythread ); offset = 0; // initialize ofmo_twoint_set_last_eri_type( mythread, -1 ); if (ebuf_max_nzeri<=0) return -1; for ( La=0; La<=maxlqn; La++ ) { for ( Lb=0; Lb<=La; Lb++ ) { Lab = La*(La+1)/2 + Lb; for ( Lc=0; Lc<=La; Lc++ ) { for ( Ld=0; Ld<=(Lc==La? Lb : Lc ); Ld++ ) { Lcd = Lc*(Lc+1)/2 + Ld; Labcd = Lab*(Lab+1)/2 + Lcd; #ifdef USE_CUDA if (cuda_use_Device() && cuda_get_optCPU(Labcd)!=0) { offset+=(leading_cs_pair[Lab+1]-leading_cs_pair[Lab]); continue; } #endif local_id = (int)((offset+(size_t)workerid)%(size_t)nworkers); ret_code = calc_twoint_buffer[Labcd]( &nworkers, &local_id, //&nworkers, &workerid, &La, &Lb, &Lc, &Ld, shel_atm, shel_ini, atom_x, atom_y, atom_z, leading_cs_pair, csp_schwarz, csp_ics, csp_jcs, csp_leading_ps_pair, psp_zeta, psp_dkps, psp_xiza, &ebuf_max_nzeri, &ebuf_non_zero_eri, ebuf_eri, ebuf_ind4, &last_ijcs, &last_klcs ); if ( ret_code == OFMO_EBUF_FULL ) { ofmo_twoint_set_last_eri_type( mythread, Labcd ); ofmo_twoint_set_last_ijcs( mythread, last_ijcs ); ofmo_twoint_set_last_klcs( mythread, last_klcs ); ofmo_twoint_set_stored_nzeri( mythread, (size_t)ebuf_non_zero_eri ); //ofmo_integ_set_loop_offset( mythread, offset ); //return 0; return Labcd; } // added for load-balancing offset+=(leading_cs_pair[Lab+1]-leading_cs_pair[Lab]); }// Ld } // Lc } // Lb } // La ofmo_twoint_set_last_eri_type( mythread, 100000 ); ofmo_twoint_set_last_ijcs( mythread, last_ijcs ); ofmo_twoint_set_last_klcs( mythread, last_klcs ); ofmo_twoint_set_stored_nzeri( mythread, (size_t)ebuf_non_zero_eri ); // added for load-balancing //ofmo_integ_set_loop_offset( mythread, offset ); //return 0; return 100000; } int ofmo_integ_add_fock( const int nao, const size_t nstored_eri, const double eri_val[], const short int eri_ind4[], const double D[], double G[] ) { int i, j, k, l, ij, kl, ik, il, jk, jl, i0, j0; size_t ix, ix4; double x, x4; /*// debug { int mythread; mythread = omp_get_thread_num(); printf("thrd= %d, nzeri= %lld\n", mythread, (long long)nstored_eri ); fflush( stdout ); }*/ for ( ix=0, ix4=0; ix<nstored_eri; ix++, ix4+=4 ) { x = eri_val[ix]; i = (int)eri_ind4[ix4+0]; j = (int)eri_ind4[ix4+1]; k = (int)eri_ind4[ix4+2]; l = (int)eri_ind4[ix4+3]; x4 = x * 4.e0; x = x * x_coef; //DFT i0 = i*nao; j0 = j*nao; ij = i0 + j; ik = i0 + k; il = i0 + l; jk = j0 + k; jl = j0 + l; kl = k*nao + l; G[ij] += D[kl]*x4; G[kl] += D[ij]*x4; G[ik] -= D[jl]*x; G[il] -= D[jk]*x; G[jk] -= D[il]*x; G[jl] -= D[ik]*x; } return 0; } /* * バッファに保存されている2電子積分を用いてFock行列を計算する * ただし、正方行列を圧縮形式に折りたたまないと、正確にならない * 引数の密度行列DとFock行列Gは正方行列として扱う * スレッド並列部分で呼び出す * Gは初期化されていることを期待している * 以下の変数はスレッド毎に異なるはず * non_zero_eri * ebuf_eri[] * ebuf_ind4[] * G[] * */ static int ofmo_twoint_fock_incore_partial( const int mythread, const int nao, const double D[], double G[] ) { short int *ebuf_ind4; double *ebuf_eri; size_t non_zero_eri; ebuf_eri = ofmo_twoint_get_ebuf_eri( mythread ); ebuf_ind4 = ofmo_twoint_get_ebuf_ind4( mythread ); non_zero_eri = ofmo_twoint_get_stored_nzeri( mythread ); ofmo_integ_add_fock( nao, non_zero_eri, ebuf_eri, ebuf_ind4, D, G ); return 0; } static void unpack_matrix( const int n, const double SRC[], double DST[] ) { int i, j, ij; ij = 0; for ( i=0; i<n; i++ ) { for ( j=0; j<=i; j++ ) { DST[i*n+j] = DST[j*n+i] = SRC[ij]; ij++; } } } /** 電子反発積分(二電子積分)計算関数(2) * @ingroup integ-top * * \c ofmo_integ_twoint 関数呼び出しで計算し保存された二電子積分を * 用いて、二電子ハミルトン行列(G行列)を計算する。バッファに保存 * されていない積分は、計算して、G行列に加算する。 * * @attention * @li この関数は、OpenMPを用いたスレッド並列化を行っている。 * スレッド並列実行をするためには、スレッド並列領域内から * この関数を呼び出す必要がある。 * @li \c nworkers とワーカID \c workerid を適切に設定すれば、OpenMPと * MPIとのハイブリッド並列実行が可能である。MPI並列時に、完全な * G行列を得るためには、この関数の終了後に * \c MPI_Allreduce 関数などを用いたリダクション処理を行う必要がある。 * @li \c ofmo_integ_twoint_first が事前に呼び出されている必要がある。 * そうでない場合には、結果がおかしくなる場合がある。 * @li 得られるG行列は、軌道量子数の大きさで * ソートされたものである。元の並びのG行列が欲しい場合には、 * 要素の並べ替えが必要である。 * * @param[in] nworkers 計算に用いるワーカプロセス(スレッド)数 * @param[in] workerid 各ワーカプロセス(スレッド)のID。 * \f$ 0\le\tt{workerid}<\tt{nworkers} \f$である。 * @param[in] maxlqn 最大軌道量子数 * @param[in] shel_atm[ics] CS番号 \c ics のCSが属する原子の番号 * @param[in] shel_ini[ics] CS番号 \c ics のCSに含まれるAOの先頭AO番号 * @param[in] atom_x[iat] 原子の番号 \c iat のx座標(au単位) * @param[in] atom_y[iat] 原子の番号 \c iat のy座標(au単位) * @param[in] atom_z[iat] 原子の番号 \c iat のz座標(au単位) * @param[in] leading_cs_pair[itype] CSペアタイプ番号 \c itype の * 先頭CSペア番号 * @param[in] csp_schwarz[icsp] CSペア番号 \c icsp のSchwarz積分 * @param[in] csp_ics[icsp] CSペア番号 \c icsp の1つ目のCS番号 * @param[in] csp_jcs[icsp] CSペア番号 \c icsp の2つめのCS番号。ただし、 * \f$ \tt{csp\_ics[icsp]} \ge \tt{csp\_jcs[icsp]} \f$ である。 * @param[in] csp_leading_ps_pair[icsp] CSペア番号 \c icsp に含まれる * PSペアの先頭PSペア番号 * @param[in] psp_zeta[ipsp] PSペア番号 \c ipsp の軌道指数和 * \f$ \zeta = \zeta_a + \zeta_b \f$ * @param[in] psp_dkps[ipsp] PSペア番号 \c ipsp の線型結合定数 * \f[ K_{ab} = \sqrt2 \pi^{5/4} \frac1{\zeta_a+\zeta_b} * \exp\left[ -\frac{\zeta_a \zeta_b}{\zeta_a + \zeta_b} * ( \boldmath A \unboldmath - \boldmath B \unboldmath )^2 * \right]\f] * @param[in] psp_xiza[ipsp] PSペア番号 \c ipsp の * \f$ \frac{\xi}{\zeta_a} = \frac{\zeta_b}{\zeta_a+\zeta_b} \f$ * @param[in] nao AO数 * @param[in] D[] 密度行列(圧縮"U"形式) * * @param[out] G[] 二電子ハミルトン行列(G行列、圧縮"U"形式) * * @retval 0 正常終了(すべての積分が保存されても、バッファサイズの不足で * 保存されていない積分があっても、正常終了である) * @retval -1 異常終了(2011/0613現在では考えていない) * * */ int ofmo_integ_gen_gmat( // parallelization const int nworkers, const int workerid, // basis set & cutoff table data const int maxlqn, const int shel_atm[], const int shel_ini[], const double atom_x[], const double atom_y[], const double atom_z[], const int leading_cs_pair[], const int leading_cs[], const double csp_schwarz[], const int csp_ics[], const int csp_jcs[], const int csp_leading_ps_pair[], const double psp_zeta[], const double psp_dkps[], const double psp_xiza[], // density matrix data & G-matrix (output) const int nao, const double D[], double G[] ) { // DFT flag //const double cHFx ) { int nao2; double *D_SQ=NULL; int mythread = 0; int La, Lb, Lc, Ld, Lab, Lcd, Labcd; int last_eri_type, last_ijcs, last_klcs; int nnao; double *Gtmp; long nzeri, max_nzeri; short *etmp_ind4; double *etmp_val; // added int local_id; size_t offset; //DFT_Bgn //x_coef = cHFx; x_coef = 1.0; //DFT_End // debug char *CS = "spdfg"; int g_last_eri_type = ofmo_twoint_get_global_last_eri_type(); #pragma omp critical D_SQ = ofmo_twoint_alloc_square_density( nao ); #pragma omp single { unpack_matrix( nao, D, D_SQ ); nao2 = nao*(nao+1)/2; memset( G, '\0', sizeof(double)*nao2 ); } float *Dcs; Dcs = ofmo_twoint_gen_Dcs(maxlqn, nao, leading_cs, D); #ifdef USE_CUDA #pragma omp master { int ret = 0; int ncs = leading_cs[maxlqn+1]; ret = cuda_genGmat_Init(ncs, nao, Dcs, D_SQ, x_coef); if (ret<0) exit(1); } #endif mythread = omp_get_thread_num(); #pragma omp barrier Gtmp = ofmo_twoint_alloc_local_gmat( mythread, nao ); last_eri_type = ofmo_twoint_get_last_eri_type( mythread ); nnao = nao*nao; memset( Gtmp, '\0', sizeof(double)*nnao ); // etmp_ind4 = ofmo_twoint_getadd_integ_ind4( mythread ); etmp_val = ofmo_twoint_getadd_integ_val( mythread ); max_nzeri = (long)ofmo_twoint_get_max_stored_integ( mythread ); ofmo_twoint_set_stored_integ( mythread, 0 ); // added for load-balancing //offset = ofmo_integ_get_loop_offset( mythread ); offset = 0; #ifndef USE_CUDA nzeri = 0; for ( La=0; La<=maxlqn; La++ ) { for ( Lb=0; Lb<=La; Lb++ ) { Lab = La*(La+1)/2 + Lb; for ( Lc=0; Lc<=La; Lc++ ) { for ( Ld=0; Ld<=(Lc==La? Lb : Lc ); Ld++ ) { Lcd = Lc*(Lc+1)/2 + Ld; Labcd = Lab*(Lab+1)/2 + Lcd; //if ( Labcd < last_eri_type ) continue; if ( Labcd < last_eri_type ) { offset+=(leading_cs_pair[Lab+1]-leading_cs_pair[Lab]); continue; } if ( Labcd == last_eri_type ) { last_ijcs = ofmo_twoint_get_last_ijcs( mythread ); last_klcs = ofmo_twoint_get_last_klcs( mythread ); /*// debug printf("#D thd=%d, (%c%c|%c%c) ijcs=%d, klcs=%d\n", mythread, CS[La], CS[Lb], CS[Lc], CS[Ld], last_ijcs, last_klcs ); fflush(stdout);*/ } else { last_ijcs = last_klcs = -1; } local_id = (int)((offset+(size_t)workerid)%(size_t)nworkers); start_w2e(); calc_twoint_direct[Labcd]( &nworkers, &local_id, //&nworkers, &workerid, &La, &Lb, &Lc, &Ld, shel_atm, shel_ini, atom_x, atom_y, atom_z, leading_cs_pair, csp_schwarz, csp_ics, csp_jcs, csp_leading_ps_pair, psp_zeta, psp_dkps, psp_xiza, &max_nzeri, &nzeri, etmp_val, etmp_ind4, &last_ijcs, &last_klcs, &nao, D_SQ, Gtmp ); set_w2e(Labcd); // added for load-balancing offset+=(leading_cs_pair[Lab+1]-leading_cs_pair[Lab]); }// Ld } // Lc } // Lb } // La if ( nzeri > 0 ) ofmo_integ_add_fock( nao, nzeri, etmp_val, etmp_ind4, D_SQ, Gtmp ); start_w2e(); ofmo_twoint_fock_incore_partial( mythread, nao, D_SQ, Gtmp ); set_w2e(-1); #else /* USE_CUDA */ nzeri = 0; // idev=1 for GPU, 0 for CPU for (int idev=1; idev>=0; idev--) { offset = 0; for ( La=0; La<=maxlqn; La++ ) { for ( Lb=0; Lb<=La; Lb++ ) { Lab = La*(La+1)/2 + Lb; for ( Lc=0; Lc<=La; Lc++ ) { for ( Ld=0; Ld<=(Lc==La? Lb : Lc ); Ld++ ) { Lcd = Lc*(Lc+1)/2 + Ld; Labcd = Lab*(Lab+1)/2 + Lcd; int gpu = (cuda_use_Device() && cuda_get_optCPU(Labcd)!=0); //if ( Labcd < last_eri_type ) continue; if (!gpu && Labcd < last_eri_type ) { offset+=(leading_cs_pair[Lab+1]-leading_cs_pair[Lab]); continue; } if ( Labcd == last_eri_type ) { last_ijcs = ofmo_twoint_get_last_ijcs( mythread ); last_klcs = ofmo_twoint_get_last_klcs( mythread ); } else { last_ijcs = last_klcs = -1; } local_id = (int)((offset+(size_t)workerid)%(size_t)nworkers); start_w2e(); //if (idev==0 && Labcd<=g_last_eri_type) { if (idev==0 && !gpu) { calc_twoint_direct[Labcd]( &nworkers, &local_id, //&nworkers, &workerid, &La, &Lb, &Lc, &Ld, shel_atm, shel_ini, atom_x, atom_y, atom_z, leading_cs_pair, csp_schwarz, csp_ics, csp_jcs, csp_leading_ps_pair, psp_zeta, psp_dkps, psp_xiza, &max_nzeri, &nzeri, etmp_val, etmp_ind4, &last_ijcs, &last_klcs, &nao, D_SQ, Gtmp ); //} else if (idev>0 && Labcd>g_last_eri_type) { } else if (idev>0 && gpu) { cuda_calc_twoint_direct(Labcd, nworkers, local_id, //nworkers, workerid, La, Lb, Lc, Ld, shel_atm, shel_ini, atom_x, atom_y, atom_z, leading_cs_pair, csp_schwarz, csp_ics, csp_jcs, csp_leading_ps_pair, psp_zeta, psp_dkps, psp_xiza, max_nzeri, &nzeri, etmp_val, etmp_ind4, last_ijcs, last_klcs, nao, D_SQ, Gtmp ); } set_w2e(Labcd); // added for load-balancing offset+=(leading_cs_pair[Lab+1]-leading_cs_pair[Lab]); }// Ld } // Lc } // Lb } // La } // idev if ( nzeri > 0 ) ofmo_integ_add_fock( nao, nzeri, etmp_val, etmp_ind4, D_SQ, Gtmp ); start_w2e(); ofmo_twoint_fock_incore_partial( mythread, nao, D_SQ, Gtmp ); set_w2e(-1); #pragma omp master { int ret; ret = cuda_genGmat_Add(nao, Gtmp); if (ret<0) exit(1); } #endif /* USE_CUDA */ // 計算が終わったスレッドの結果を順次Gに加算する #pragma omp critical { int i, j, ij; ij = 0; for ( i=0; i<nao; i++ ) { for ( j=0; j<i; j++ ) { G[ij] += Gtmp[i*nao+j] + Gtmp[j*nao+i]; ij++; } G[ij] += Gtmp[i*nao+i]; ij++; } } return 0; } //static int (*calc_twoint_direct[])( static int (*calc_ifc4c[])( // parallelization const int *pnworkers, const int *pworkerid, // integral type data const int *pLa, const int *pLb, const int *pLc, const int *pLd, // basis and cutoff table data for fragment const int shel_atm_frg[], const int shel_ini_frg[], const double atom_x_frg[], const double atom_y_frg[], const double atom_z_frg[], const int leading_cs_pair_frg[], const double csp_schwarz_frg[], const int csp_ics_frg[], const int csp_jcs_frg[], const int csp_leading_ps_pair_frg[], const double psp_zeta_frg[], const double psp_dkps_frg[], const double psp_xiza_frg[], // basis and cutoff table data for monomer const int shel_atm_mon[], const int shel_ini_mon[], const double atom_x_mon[], const double atom_y_mon[], const double atom_z_mon[], const int leading_cs_pair_mon[], const double csp_schwarz_mon[], const int csp_ics_mon[], const int csp_jcs_mon[], const int csp_leading_ps_pair_mon[], const double psp_zeta_mon[], const double psp_dkps_mon[], const double psp_xiza_mon[], // density matrix of monomer const double D_mon[], // (output) Coulomb potential double V_frg[] ) = { // original /*ofmo_ifc4c_ssss__, ofmo_ifc4c_ssps__, ofmo_ifc4c_sspp__, ofmo_ifc4c_ssds__, ofmo_ifc4c_ssdp__, ofmo_ifc4c_ssdd__, ofmo_ifc4c_psss__, ofmo_ifc4c_psps__, ofmo_ifc4c_pspp__, ofmo_ifc4c_psds__, ofmo_ifc4c_psdp__, ofmo_ifc4c_psdd__, ofmo_ifc4c_ppss__, ofmo_ifc4c_ppps__, ofmo_ifc4c_pppp__, ofmo_ifc4c_ppds__, ofmo_ifc4c_ppdp__, ofmo_ifc4c_ppdd__, ofmo_ifc4c_dsss__, ofmo_ifc4c_dsps__, ofmo_ifc4c_dspp__, ofmo_ifc4c_dsds__, ofmo_ifc4c_dsdp__, ofmo_ifc4c_dsdd__, ofmo_ifc4c_dpss__, ofmo_ifc4c_dpps__, ofmo_ifc4c_dppp__, ofmo_ifc4c_dpds__, ofmo_ifc4c_dpdp__, ofmo_ifc4c_dpdd__, ofmo_ifc4c_ddss__, ofmo_ifc4c_ddps__, ofmo_ifc4c_ddpp__, ofmo_ifc4c_ddds__, ofmo_ifc4c_dddp__, ofmo_ifc4c_dddd__,*/ // OS ofmo_ifc4c_os_ssss, ofmo_ifc4c_os_ssps, ofmo_ifc4c_os_sspp, ofmo_ifc4c_os_ssds, ofmo_ifc4c_os_ssdp, ofmo_ifc4c_os_ssdd, ofmo_ifc4c_os_psss, ofmo_ifc4c_os_psps, ofmo_ifc4c_os_pspp, ofmo_ifc4c_os_psds, ofmo_ifc4c_os_psdp, ofmo_ifc4c_os_psdd, ofmo_ifc4c_os_ppss, ofmo_ifc4c_os_ppps, ofmo_ifc4c_os_pppp, ofmo_ifc4c_os_ppds, ofmo_ifc4c_os_ppdp, ofmo_ifc4c_os_ppdd, ofmo_ifc4c_os_dsss, ofmo_ifc4c_os_dsps, ofmo_ifc4c_os_dspp, ofmo_ifc4c_os_dsds, ofmo_ifc4c_os_dsdp, ofmo_ifc4c_os_dsdd, ofmo_ifc4c_os_dpss, ofmo_ifc4c_os_dpps, ofmo_ifc4c_os_dppp, ofmo_ifc4c_os_dpds, ofmo_ifc4c_os_dpdp, ofmo_ifc4c_os_dpdd, ofmo_ifc4c_os_ddss, ofmo_ifc4c_os_ddps, ofmo_ifc4c_os_ddpp, ofmo_ifc4c_os_ddds, ofmo_ifc4c_os_dddp, ofmo_ifc4c_os_dddd, // Rys /*ofmo_ifc4c_rys_ssss, ofmo_ifc4c_rys_ssps, ofmo_ifc4c_rys_sspp, ofmo_ifc4c_rys_ssds, ofmo_ifc4c_rys_ssdp, ofmo_ifc4c_rys_ssdd, ofmo_ifc4c_rys_psss, ofmo_ifc4c_rys_psps, ofmo_ifc4c_rys_pspp, ofmo_ifc4c_rys_psds, ofmo_ifc4c_rys_psdp, ofmo_ifc4c_rys_psdd, ofmo_ifc4c_rys_ppss, ofmo_ifc4c_rys_ppps, ofmo_ifc4c_rys_pppp, ofmo_ifc4c_rys_ppds, ofmo_ifc4c_rys_ppdp, ofmo_ifc4c_rys_ppdd, ofmo_ifc4c_rys_dsss, ofmo_ifc4c_rys_dsps, ofmo_ifc4c_rys_dspp, ofmo_ifc4c_rys_dsds, ofmo_ifc4c_rys_dsdp, ofmo_ifc4c_rys_dsdd, ofmo_ifc4c_rys_dpss, ofmo_ifc4c_rys_dpps, ofmo_ifc4c_rys_dppp, ofmo_ifc4c_rys_dpds, ofmo_ifc4c_rys_dpdp, ofmo_ifc4c_rys_dpdd, ofmo_ifc4c_rys_ddss, ofmo_ifc4c_rys_ddps, ofmo_ifc4c_rys_ddpp, ofmo_ifc4c_rys_ddds, ofmo_ifc4c_rys_dddp, ofmo_ifc4c_rys_dddd,*/ }; /** 4中心クーロン相互作用項の計算を行う関数 * @ingroup integ-top * * FMO計算に現れる、2つのモノマー間の4中心クーロン相互作用項を * 計算する。 * 4中心クーロン積分を計算して、積分と与えられた密度行列を元に、 * 4中心クーロン相互作用項を求める。 * * @attention * @li OpenMPを用いたスレッド並列化を行っている。スレッド並列時には、 * スレッド並列領域内でこの関数を呼び出す必要がある。 * @li \c nworkers とワーカID \c workerid を適切に設定すれば、OpenMPと * MPIとのハイブリッド並列実行が可能である。MPI並列時に、完全な * クーロン項を得るためには、この関数の終了後に * \c MPI_Allreduce 関数などを用いたリダクション処理を行う必要がある。 * @li 得られるクーロン項 \c V_frg[] は、軌道量子数の大きさで * ソートされたものである。元の並びのクーロン項が欲しい場合には、 * 要素の並べ替えが必要である。 * * @param[in] nworkers 計算に用いるワーカプロセス(スレッド)数 * @param[in] workerid ワーカID。 * \f$ 0\le \tt{workerid} < \tt{nworkers} \f$ * * @param[in] maxlqn 最大軌道量子数 * @param[in] shel_atm_frg[ics] 対象フラグメントの、 * CS番号 \c ics のCSが属する原子の番号 * @param[in] shel_ini_frg[ics] 対象フラグメントの、 * CS番号 \c ics のCSに含まれるAOの先頭AO番号 * @param[in] atom_x_frg[iat] 対象フラグメントの、 * 原子の番号 \c iat のx座標(au単位) * @param[in] atom_y_frg[iat] 対象フラグメントの、 * 原子の番号 \c iat のy座標(au単位) * @param[in] atom_z_frg[iat] 対象フラグメントの、 * 原子の番号 \c iat のz座標(au単位) * @param[in] leading_cs_pair_frg[itype] 対象フラグメントの、 * CSペアタイプ番号 \c itype の先頭CSペア番号 * @param[in] csp_schwarz_frg[icsp] 対象フラグメントの、 * CSペア番号 \c icsp のSchwarz積分 * @param[in] csp_ics_frg[icsp] 対象フラグメントの、 * CSペア番号 \c icsp の1つ目のCS番号 * @param[in] csp_jcs_frg[icsp] 対象フラグメントの、 * CSペア番号 \c icsp の2つめのCS番号。ただし、 * \f$ \tt{csp\_ics[icsp]} \ge \tt{csp\_jcs[icsp]} \f$ である。 * @param[in] csp_leading_ps_pair_frg[icsp] 対象フラグメントの、 * CSペア番号 \c icsp に含まれるPSペアの先頭PSペア番号 * @param[in] psp_zeta_frg[ipsp] 対象フラグメントの、 * PSペア番号 \c ipsp の軌道指数和 * \f$ \zeta = \zeta_a + \zeta_b \f$ * @param[in] psp_dkps_frg[ipsp] 対象フラグメントの、 * PSペア番号 \c ipsp の線型結合定数 * \f[ K_{ab} = \sqrt2 \pi^{5/4} \frac1{\zeta_a+\zeta_b} * \exp\left[ -\frac{\zeta_a \zeta_b}{\zeta_a + \zeta_b} * ( \boldmath A \unboldmath - \boldmath B \unboldmath )^2 * \right]\f] * @param[in] psp_xiza_frg[ipsp] 対象フラグメントの、 * PSペア番号 \c ipsp の、 * \f$ \frac{\xi}{\zeta_a} = \frac{\zeta_b}{\zeta_a+\zeta_b} \f$ * * @param[in] shel_atm_mon[ics] 相手モノマーの、 * CS番号 \c ics のCSが属する原子の番号 * @param[in] shel_ini_mon[ics] 相手モノマーの、 * CS番号 \c ics のCSに含まれるAOの先頭AO番号 * @param[in] atom_x_mon[iat] 相手モノマーの、 * 原子の番号 \c iat のx座標(au単位) * @param[in] atom_y_mon[iat] 相手モノマーの、 * 原子の番号 \c iat のy座標(au単位) * @param[in] atom_z_mon[iat] 相手モノマーの、 * 原子の番号 \c iat のz座標(au単位) * @param[in] leading_cs_pair_mon[itype] 相手モノマーの、 * CSペアタイプ番号 \c itype の先頭CSペア番号 * @param[in] csp_schwarz_mon[icsp] 相手モノマーの、 * CSペア番号 \c icsp のSchwarz積分 * @param[in] csp_ics_mon[icsp] 相手モノマーの、 * CSペア番号 \c icsp の1つ目のCS番号 * @param[in] csp_jcs_mon[icsp] 相手モノマーの、 * CSペア番号 \c icsp の2つめのCS番号。ただし、 * \f$ \tt{csp\_ics[icsp]} \ge \tt{csp\_jcs[icsp]} \f$ である。 * @param[in] csp_leading_ps_pair_mon[icsp] ターゲットフラグメントの、 * CSペア番号 \c icsp に含まれるPSペアの先頭PSペア番号 * @param[in] psp_zeta_mon[ipsp] 相手モノマーの、 * PSペア番号 \c ipsp の軌道指数和 * \f$ \zeta = \zeta_a + \zeta_b \f$ * @param[in] psp_dkps_mon[ipsp] 相手モノマーの、 * PSペア番号 \c ipsp の線型結合定数 * \f[ K_{ab} = \sqrt2 \pi^{5/4} \frac1{\zeta_a+\zeta_b} * \exp\left[ -\frac{\zeta_a \zeta_b}{\zeta_a + \zeta_b} * ( \boldmath A \unboldmath - \boldmath B \unboldmath )^2 * \right]\f] * @param[in] psp_xiza_mon[ipsp] 相手モノマーの、 * PSペア番号 \c ipsp の * \f$ \frac{\xi}{\zeta_a} = \frac{\zeta_b}{\zeta_a+\zeta_b} \f$ * * @param[in] nao_mon 相手モノマーのAO数 * @param[in] D_mon[] 相手モノマーの密度行列(圧縮"U"形式) * * @param[out] V_frg[] 対象フラグメントにおける * 相手モノマーとの間の4中心クーロン相互作用項 * (G行列、圧縮"U"形式)。この配列は、スレッドごとに別領域 * を与える必要がある。 * * @retval 0 正常終了(すべての積分が保存されても、バッファサイズの不足で * 保存されていない積分があっても、正常終了である) * @retval -1 異常終了(2011/06/14現在では考えていない) * * */ int ofmo_integ_ifc4c_sorted_partial( // parallelization const int nworkers, const int workerid, // basis and cutoff table data for fragment const int maxlqn, const int shel_atm_frg[], const int shel_ini_frg[], const double atom_x_frg[], const double atom_y_frg[], const double atom_z_frg[], const int leading_cs_pair_frg[], const double csp_schwarz_frg[], const int csp_ics_frg[], const int csp_jcs_frg[], const int csp_leading_ps_pair_frg[], const double psp_zeta_frg[], const double psp_dkps_frg[], const double psp_xiza_frg[], // basis and cutoff table data for monomer const int shel_atm_mon[], const int shel_ini_mon[], const double atom_x_mon[], const double atom_y_mon[], const double atom_z_mon[], const int leading_cs_pair_mon[], const double csp_schwarz_mon[], const int csp_ics_mon[], const int csp_jcs_mon[], const int csp_leading_ps_pair_mon[], const double psp_zeta_mon[], const double psp_dkps_mon[], const double psp_xiza_mon[], // const int leading_cs_mon[], // density matrix of monomer const int nao_mon, const double D_mon[], // (output) Coulomb potential double V_frg[] ) { int La, Lb, Lc, Ld, Lab, Lcd, Labcd; // added for load-balancing int offset=0, local_id=workerid, mythread; // int type, pos; int ma[] = {0, 1, 1, 2, 2, 2,}; int mb[] = {0, 0, 1, 0, 1, 2,}; // added for load-balancing mythread = omp_get_thread_num(); //type = ofmo_integ_get_target_type( mythread ); type = -1; // 動的負荷分散しない #pragma omp master TLOG_LOG_IN(5); float *Dcs; Dcs = ofmo_twoint_gen_Dcs(maxlqn, nao_mon, leading_cs_mon, D_mon); #ifdef USE_CUDA #pragma omp master { int ret = 0; int ncs_mon = leading_cs_mon[maxlqn+1]; ret = cuda_ifc4c_SetDcs(ncs_mon, Dcs); if (ret<0) exit(1); ret = cuda_ifc4c_calc_Init(); if (ret<0) exit(1); } #endif #ifndef USE_CUDA offset = ofmo_integ_get_loop_offset( mythread ); offset = 0; local_id = (offset+workerid)%nworkers; for ( La=0; La<=maxlqn; La++ ) { for ( Lb=0; Lb<=La; Lb++ ) { Lab = La*(La+1)/2 + Lb; for ( Lc=0; Lc<=maxlqn; Lc++ ) { for ( Ld=0; Ld<=Lc; Ld++ ) { Lcd = Lc*(Lc+1)/2 + Ld; //Labcd = Lab*maxlqn2 + Lcd; Labcd = Lab*6 + Lcd; calc_ifc4c[Labcd]( &nworkers, &local_id, &La, &Lb, &Lc, &Ld, shel_atm_frg, shel_ini_frg, atom_x_frg, atom_y_frg, atom_z_frg, leading_cs_pair_frg, csp_schwarz_frg, csp_ics_frg, csp_jcs_frg, csp_leading_ps_pair_frg, psp_zeta_frg, psp_dkps_frg, psp_xiza_frg, shel_atm_mon, shel_ini_mon, atom_x_mon, atom_y_mon, atom_z_mon, leading_cs_pair_mon, csp_schwarz_mon, csp_ics_mon, csp_jcs_mon, csp_leading_ps_pair_mon, psp_zeta_mon, psp_dkps_mon, psp_xiza_mon, D_mon, V_frg ); // added for load-balancing offset += (leading_cs_pair_frg[Lab+1]-leading_cs_pair_frg[Lab]); local_id = (offset+workerid)%nworkers; } } } } #else /* USE_CUDA */ offset = ofmo_integ_get_loop_offset( mythread ); offset = 0; local_id = (offset+workerid)%nworkers; // idev=1 for GPU, 0 for CPU for (int idev=1; idev>=0; idev--) { for ( La=0; La<=maxlqn; La++ ) { for ( Lb=0; Lb<=La; Lb++ ) { Lab = La*(La+1)/2 + Lb; for ( Lc=0; Lc<=maxlqn; Lc++ ) { for ( Ld=0; Ld<=Lc; Ld++ ) { Lcd = Lc*(Lc+1)/2 + Ld; //Labcd = Lab*maxlqn2 + Lcd; Labcd = Lab*6 + Lcd; cuda_ifc4c_calc(idev, &nworkers, &local_id, &La, &Lb, &Lc, &Ld, shel_atm_frg, shel_ini_frg, atom_x_frg, atom_y_frg, atom_z_frg, leading_cs_pair_frg, csp_schwarz_frg, csp_ics_frg, csp_jcs_frg, csp_leading_ps_pair_frg, psp_zeta_frg, psp_dkps_frg, psp_xiza_frg, shel_atm_mon, shel_ini_mon, atom_x_mon, atom_y_mon, atom_z_mon, leading_cs_pair_mon, csp_schwarz_mon, csp_ics_mon, csp_jcs_mon, csp_leading_ps_pair_mon, psp_zeta_mon, psp_dkps_mon, psp_xiza_mon, D_mon, V_frg ); // added for load-balancing offset += (leading_cs_pair_frg[Lab+1]-leading_cs_pair_frg[Lab]); local_id = (offset+workerid)%nworkers; } } } } } #endif /* USE_CUDA */ // added for load-balancing ofmo_integ_set_loop_offset( mythread, offset ); #pragma omp master TLOG_LOG_OUT(5); return 0; } static int (*calc_ifc3c[]) ( // parallelization const int *pnworkers, const int *pworkerid, // integral type data const int *pLa, const int *pLb, const int *pLc, // basis and cutoff table data for fragment const int shel_atm_frg[], const int shel_ini_frg[], const double atom_x_frg[], const double atom_y_frg[], const double atom_z_frg[], const int leading_cs_pair_frg[], //const double csp_schwarz_frg[], const int csp_ics_frg[], const int csp_jcs_frg[], const int csp_leading_ps_pair_frg[], const double psp_zeta_frg[], const double psp_dkps_frg[], const double psp_xiza_frg[], // basis set data for monomer const int leading_cs_mon[], const int shel_tem_mon[], const int shel_atm_mon[], const int shel_add_mon[], const int shel_ini_mon[], const double atom_x_mon[], const double atom_y_mon[], const double atom_z_mon[], const double prim_exp_mon[], const double prim_coe_mon[], // monomer AO population const double ao_pop_mon[], // (output) Coulomb potential double V_frg[] ) = { // OS ofmo_ifc3c_os_ssss, ofmo_ifc3c_os_sspp, ofmo_ifc3c_os_ssdd, ofmo_ifc3c_os_psss, ofmo_ifc3c_os_pspp, ofmo_ifc3c_os_psdd, ofmo_ifc3c_os_ppss, ofmo_ifc3c_os_pppp, ofmo_ifc3c_os_ppdd, ofmo_ifc3c_os_dsss, ofmo_ifc3c_os_dspp, ofmo_ifc3c_os_dsdd, ofmo_ifc3c_os_dpss, ofmo_ifc3c_os_dppp, ofmo_ifc3c_os_dpdd, ofmo_ifc3c_os_ddss, ofmo_ifc3c_os_ddpp, ofmo_ifc3c_os_dddd, // Rys /*ofmo_ifc3c_rys_ssss, ofmo_ifc3c_rys_sspp, ofmo_ifc3c_rys_ssdd, ofmo_ifc3c_rys_psss, ofmo_ifc3c_rys_pspp, ofmo_ifc3c_rys_psdd, ofmo_ifc3c_rys_ppss, ofmo_ifc3c_rys_pppp, ofmo_ifc3c_rys_ppdd, ofmo_ifc3c_rys_dsss, ofmo_ifc3c_rys_dspp, ofmo_ifc3c_rys_dsdd, ofmo_ifc3c_rys_dpss, ofmo_ifc3c_rys_dppp, ofmo_ifc3c_rys_dpdd, ofmo_ifc3c_rys_ddss, ofmo_ifc3c_rys_ddpp, ofmo_ifc3c_rys_dddd,*/ }; /** 3中心クーロン相互作用項の計算を行う関数 * @ingroup integ-top * * FMO計算に現れる、2つのモノマー間の3中心クーロン相互作用項を * 計算する。 * 3中心クーロン積分を計算して、積分と与えられた密度行列を元に、 * 3中心クーロン相互作用項を求める。 * * @attention * @li この関数はOpenMPを用いたスレッド並列化が行われている。スレッド並列 * 実行のためには、スレッド並列領域内からこの関数を呼び出す * 必要がある。 * @li \c nworkers と \c workerid を適切に設定すると、OpenMPとMPIの * ハイブリッド並列実行が可能である。MPI並列を利用する際には、 * 関数終了後に、\c MPI_Allreduce 関数などを用いたリダクション処理 * を行うことで、完全なクーロン項が得られる。 * @li 得られるクーロン項 \c V_frg[] は、軌道量子数の大きさで * ソートされたものである。元の並びのクーロン項が欲しい場合には、 * 要素の並べ替えが必要である。 * * @param[in] nworkers 計算に用いるワーカプロセス(スレッド)数 * @param[in] workerid ワーカID。 * \f$ 0\le \tt{workerid} < \tt{nworkers} \f$ * * @param[in] maxlqn 最大軌道量子数 * @param[in] shel_atm_frg[ics] 対象フラグメントの、 * CS番号 \c ics のCSが属する原子の番号 * @param[in] shel_ini_frg[ics] 対象フラグメントの、 * CS番号 \c ics のCSに含まれるAOの先頭AO番号 * @param[in] atom_x_frg[iat] 対象フラグメントの、 * 原子の番号 \c iat のx座標(au単位) * @param[in] atom_y_frg[iat] 対象フラグメントの、 * 原子の番号 \c iat のy座標(au単位) * @param[in] atom_z_frg[iat] 対象フラグメントの、 * 原子の番号 \c iat のz座標(au単位) * @param[in] leading_cs_pair_frg[itype] 対象フラグメントの、 * CSペアタイプ番号 \c itype の先頭CSペア番号 * @param[in] csp_schwarz_frg[icsp] 対象フラグメントの、 * CSペア番号 \c icsp のSchwarz積分 * @param[in] csp_ics_frg[icsp] 対象フラグメントの、 * CSペア番号 \c icsp の1つ目のCS番号 * @param[in] csp_jcs_frg[icsp] 対象フラグメントの、 * CSペア番号 \c icsp の2つめのCS番号。ただし、 * \f$ \tt{csp\_ics[icsp]} \ge \tt{csp\_jcs[icsp]} \f$ である。 * @param[in] csp_leading_ps_pair_frg[icsp] 対象フラグメントの、 * CSペア番号 \c icsp に含まれるPSペアの先頭PSペア番号 * @param[in] psp_zeta_frg[ipsp] 対象フラグメントの、 * PSペア番号 \c ipsp の軌道指数和 * \f$ \zeta = \zeta_a + \zeta_b \f$ * @param[in] psp_dkps_frg[ipsp] 対象フラグメントの、 * PSペア番号 \c ipsp の線型結合定数 * \f[ K_{ab} = \sqrt2 \pi^{5/4} \frac1{\zeta_a+\zeta_b} * \exp\left[ -\frac{\zeta_a \zeta_b}{\zeta_a + \zeta_b} * ( \boldmath A \unboldmath - \boldmath B \unboldmath )^2 * \right]\f] * @param[in] psp_xiza_frg[ipsp] 対象フラグメントの、 * PSペア番号 \c ipsp の * \f$ \frac{\xi}{\zeta_a} = \frac{\zeta_b}{\zeta_a+\zeta_b} \f$ * * @param[in] leading_cs_mon[lqn] 相手モノマーの、 * 軌道量子数 \c lqn の先頭CS番号 * @param[in] shel_tem_mon[ics] 相手モノマーの、CS番号 \c ics のCSの縮約長 * @param[in] shel_atm_mon[ics] 相手モノマーの、 * CS番号 \c ics のCSが属する原子の番号 * @param[in] shel_add_mon[ics] 相手モノマーの、CS番号 \c ics のCSに属する * PSの先頭PS番号 * @param[in] shel_ini_mon[ics] 相手モノマーの、CS番号 \c ics のCSに * 含まれるAOの先頭AO番号 * @param[in] atom_x_mon[iat] 相手モノマーの、 * 原子の番号 \c iat のx座標(au単位) * @param[in] atom_y_mon[iat] 相手モノマーの、 * 原子の番号 \c iat のy座標(au単位) * @param[in] atom_z_mon[iat] 相手モノマーの、 * 原子の番号 \c iat のz座標(au単位) * @param[in] prim_exp_mon[ips] 相手モノマーの、PS番号 \c ips のPSの * 軌道指数 * @param[in] prim_coe_mon[ips] 相手モノマーの、PS番号 \c ips のPSの * 規格化定数込みの縮約係数 * * @param[in] ao_pop_mon[] 相手モノマーの、AO population * * @param[out] V_frg[] 対象フラグメントにおける * 相手モノマーとの間の3中心クーロン相互作用項 * (G行列、圧縮"U"形式)。この配列は、スレッドごとに別領域 * を与える必要がある。 * * @retval 0 正常終了(すべての積分が保存されても、バッファサイズの不足で * 保存されていない積分があっても、正常終了である) * @retval -1 異常終了(2011/06/14現在では考えていない) * * */ int ofmo_integ_ifc3c_sorted_partial( // parallelization const int nworkers, const int workerid, // basis and cutoff table data for fragment const int maxlqn, const int shel_atm_frg[], const int shel_ini_frg[], const double atom_x_frg[], const double atom_y_frg[], const double atom_z_frg[], const int leading_cs_pair_frg[], //const double csp_schwarz_frg[], const int csp_ics_frg[], const int csp_jcs_frg[], const int csp_leading_ps_pair_frg[], const double psp_zeta_frg[], const double psp_dkps_frg[], const double psp_xiza_frg[], // basis set data for monomer const int leading_cs_mon[], const int shel_tem_mon[], const int shel_atm_mon[], const int shel_add_mon[], const int shel_ini_mon[], const double atom_x_mon[], const double atom_y_mon[], const double atom_z_mon[], const double prim_exp_mon[], const double prim_coe_mon[], // monomer AO population const double ao_pop_mon[], // (output) Coulomb potential double V_frg[] ) { int La, Lb, Lc, Lab, Labc; // added for load-balancing int offset, local_id=workerid, mythread; // int type, pos; int ma[] = {0, 1, 1, 2, 2, 2,}; int mb[] = {0, 0, 1, 0, 1, 2,}; // added for load-balancing mythread = omp_get_thread_num(); //type = ofmo_integ_get_target_type( mythread ); // 動的負荷分散しない type = -1; offset = ofmo_integ_get_loop_offset( mythread ); local_id = (offset+workerid)%nworkers; for ( La=0; La<=maxlqn; La++ ) { for ( Lb=0; Lb<=La; Lb++ ) { Lab = La*(La+1)/2 + Lb; for ( Lc=0; Lc<=maxlqn; Lc++ ) { Labc = 3*Lab + Lc; calc_ifc3c[Labc]( &nworkers, &local_id, &La, &Lb, &Lc, shel_atm_frg, shel_ini_frg, atom_x_frg, atom_y_frg, atom_z_frg, leading_cs_pair_frg, csp_ics_frg, csp_jcs_frg, csp_leading_ps_pair_frg, psp_zeta_frg, psp_dkps_frg, psp_xiza_frg, leading_cs_mon, shel_tem_mon, shel_atm_mon, shel_add_mon, shel_ini_mon, atom_x_mon, atom_y_mon, atom_z_mon, prim_exp_mon, prim_coe_mon, ao_pop_mon, V_frg ); // added for load-balancing offset += (leading_cs_pair_frg[Lab+1] - leading_cs_pair_frg[Lab]); local_id = (offset+workerid)%nworkers; } } } ofmo_integ_set_loop_offset( mythread, offset ); return 0; } static int (*calc_ifc2c[]) ( // parallelization const int *nworkers, const int *workerid, // integral type data const int *pLa, const int *pLb, // basis set data for fragment const int leading_cs_frg[], const int shel_tem_frg[], const int shel_atm_frg[], const int shel_add_frg[], const int shel_ini_frg[], const double atom_x_frg[], const double atom_y_frg[], const double atom_z_frg[], const double prim_exp_frg[], const double prim_coe_frg[], // atomic charge and atomic coordinate data for monomer const int *nat_mon, const double atom_x_mon[], const double atom_y_mon[], const double atom_z_mon[], const double atm_pop_mon[], // output double V_frg[] ) = { ofmo_ifc2c_ss__, ofmo_ifc2c_ps__, ofmo_ifc2c_pp__, ofmo_ifc2c_ds__, ofmo_ifc2c_dp__, ofmo_ifc2c_dd__, }; /** 2中心クーロン相互作用項の計算を行う関数 * @ingroup integ-top * * FMO計算で現れる2中心クーロン相互作用項を計算する関数。 * 計算対象のフラグメントのソート基底関数と、相手モノマーのatomic * populationを与えると、2中心クーロン作用項が計算される。 * * @attention * @li スレッド並列実行を行う場合には、スレッド並列領域内から * この関数を呼び出す必要がある。 * @li \c nworkers と \c workerid を適切に設定すると、OpenMPとMPIの * ハイブリッド並列実行が可能である。ただし、MPI並列利用時には、 * 関数終了後に、\c MPI_Allreduce などを用いたリダクション処理を * 行うことで、完全なクーロン項が得られる * @li 得られるクーロン項 \c V_frg[] は、軌道量子数の大きさで * ソートされたものである。元の並びのクーロン項が欲しい場合には、 * 要素の並べ替えが必要である。 * * @param[in] nworkers 計算に用いるワーカプロセス(スレッド)数 * @param[in] workerid 各ワーカプロセス(スレッド)のID。 * \f$ 0\le\tt{workerid}<\tt{nworkers} \f$である。 * @param[in] maxlqn 最大軌道量子数 * @param[in] leading_cs_frg[lqn] 対象フラグメントの、 * 軌道量子数 \c lqn の先頭CS番号 * @param[in] shel_tem_frg[ics] 対象フラグメントの、 * CS番号 \c ics のCSの縮約長 * @param[in] shel_atm_frg[ics] 対象フラグメントの、 * CS番号 \c ics のCSが属する原子の番号 * @param[in] shel_add_frg[ics] 対象フラグメントの、 * CS番号 \c ics のCSに含まれるPSの先頭PS番号 * @param[in] shel_ini_frg[ics] 対象フラグメントの、 * CS番号 \c ics のCSに含まれるAOの先頭AO番号 * @param[in] atom_x_frg[iat] 対象フラグメントの、 * 原子の番号 \c iat のx座標(au単位) * @param[in] atom_y_frg[iat] 対象フラグメントの、 * 原子の番号 \c iat のy座標(au単位) * @param[in] atom_z_frg[iat] 対象フラグメントの、 * 原子の番号 \c iat のz座標(au単位) * @param[in] prim_exp_frg[ips] 対象フラグメントの、 * PS番号 \c ips のPSの軌道指数 * @param[in] prim_coe_frg[ips] 対象フラグメントの、 * PS番号 \c ips のPSの規格化定数込みの縮約係数 * * @param[in] nat_mon 相手モノマーの、原子数 * @param[in] atom_x_mon[iat] 相手モノマーの、 * 原子の番号 \c iat のx座標(au単位) * @param[in] atom_y_mon[iat] 相手モノマーの、 * 原子の番号 \c iat のy座標(au単位) * @param[in] atom_z_mon[iat] 相手モノマーの、 * 原子の番号 \c iat のz座標(au単位) * @param[in] atm_pop_mon[iat] 相手モノマーの、 * 原子の番号 \c iat の原子番号 * * @param[out] V_frg[] 対象フラグメントにおける * 相手モノマーとの間の2中心クーロン相互作用項 * (G行列、圧縮"U"形式)。この配列は、同一プロセス内のスレッド * 間で共有である。 * * @retval 0 正常終了 * @retval -1 異常終了(いま(2011/06/13)のところ考えていない) * * */ int ofmo_integ_ifc2c_sorted_partial( // parallelization const int nworkers, const int workerid, // input data of fragment const int maxlqn, const int leading_cs_frg[], const int shel_tem_frg[], const int shel_atm_frg[], const int shel_add_frg[], const int shel_ini_frg[], const double atom_x_frg[], const double atom_y_frg[], const double atom_z_frg[], const double prim_exp_frg[], const double prim_coe_frg[], // input data of counter monomer const int nat_mon, const double atom_x_mon[], const double atom_y_mon[], const double atom_z_mon[], const double atm_pop_mon[], // output data double V_frg[] ) { int La, Lb, Lab; int type, mythread; int ma[] = { 0, 1, 1, 2, 2, 2, }; int mb[] = { 0, 0, 1, 0, 1, 2, }; mythread = omp_get_thread_num(); //type = ofmo_integ_get_target_type( mythread ); type = -1; for ( La=0; La<=maxlqn; La++ ) { for ( Lb=0; Lb<=La; Lb++ ) { Lab = La*(La+1)/2 + Lb; calc_ifc2c[Lab]( &nworkers, &workerid, &La, &Lb, leading_cs_frg, shel_tem_frg, shel_atm_frg, shel_add_frg, shel_ini_frg, atom_x_frg, atom_y_frg, atom_z_frg, prim_exp_frg, prim_coe_frg, &nat_mon, atom_x_mon, atom_y_mon, atom_z_mon, atm_pop_mon, V_frg ); } } return 0; }
sumavectoresparallel.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> #define TAM 8 int main() { float *a,*b,*c; int memsize = sizeof(float)*TAM;//Ahorramos dos multiplicaciones a=(float *)malloc(memsize); b=(float *)malloc(memsize); c=(float *)malloc(memsize); for(int i=0; i<TAM; ++i)//Con ++i evitamos la creacion de un objeto nuevo a[i]=b[i]=1.0f;//Con 1.0f, obligamos explicitamente que la variable sea de punto flotante int id; omp_set_num_threads(4); #pragma omp parallel { for(int i=omp_get_thread_num(); i<TAM; i+=omp_get_num_threads()){ printf("Soy el hilo %i y accedo a %d\n", omp_get_thread_num(), i); *(c+i)=*(a+i)+*(b+i); } } printf("Resultado del tercer vector, c: \n"); for(int i=0; i<TAM; ++i) printf("%f, ", c[i]); printf("\n"); return 0; }
otcalc.c
/****************************************************************************** * Python extension module for calculating Lyman-alpha optical depths. * * Function calc_optdepth() takes arguments: * n_HI in [m^-3 (proper)], sightline neutral hydrogen density, * v_Hub in [km/s], sightline Hubble velocity, * v_pec in [km/s], sightline gas peculiar velocity, * b in [m/s], sightline Doppler parameter for Voigt profile, * dx in [m (proper)], sightline spatial bin width(s), * nlos, number of sightlines, * nbins, number of bins in each sightline, * nthreads, number of OpenMP threads to use in calculation, * * where n_HI, v_Hub, v_pec, b and dx are 1D NumPy arrays (of dtype np.float64). * nlos, nbins and nthreads are Python integers. * * It returns an array of Lyman-alpha optical depths. * * LHW 04/12/17 * updated 26/04/17 *****************************************************************************/ #include <math.h> #include <omp.h> #include "Python.h" #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION #include "numpy/arrayobject.h" /* Constants for Voigt profile */ #define PI 3.14159265358979323846 /* Dimensionless */ #define LAMBDA_LYA_H1 1.2156701e-7 /* Lya wavelength in m */ #define FOSC_LYA 0.416400 /* Dimensionless */ #define ECHARGE 1.60217662e-19 /* Coulombs */ #define EMASS 9.10938356e-31 /* kg */ #define C 2.99792458e8 /* m/s */ #define EPSILON0 8.85418782e-12 /* m^-3 kg^-1 s^4 A^2 */ #define GAMMA_LYA_H1 6.265e8 /* Lya decay rate in s^-1 */ /* Prototypes */ void sighandler(int sig); double voigt_profile (double vdiff, double b); static PyObject* calc_optdepth(PyObject *self, PyObject *args); /*---------------------------------------------------------------------------*/ /* MODULE FUNCTIONS ---------------------------------------------------------*/ /* Signal handler to allow interrupts when extension in use */ void sighandler(int sig) { fprintf(stderr,"\nSignal = %s (SIGID = %d). \n",strsignal(sig), sig); exit(sig); } /* Voigt profile function */ double voigt_profile (double vdiff, double b) { /* Takes relative velocities in [km/s] and b in [m/s] */ /* Returns profile in [m^2 s] */ double sigma_av_Lya, k2, k3, a; double T0, T1, T2; double numer, subfrac, profile; /* Convert units */ vdiff *= 1e3; /* km/s to m/s */ /* Average cross section */ sigma_av_Lya = FOSC_LYA*ECHARGE*ECHARGE/(4*EMASS*C*EPSILON0); /* m^2 */ /* Profile factors */ k2 = GAMMA_LYA_H1*LAMBDA_LYA_H1/(4.0*PI); /* m/s */ k3 = sigma_av_Lya*LAMBDA_LYA_H1/(sqrt(PI)*b); /* m^2 s */ a = k2/b; /* dimensionless */ T0 = (vdiff/b) * (vdiff/b); T1 = exp(-T0); T2 = 1.5/T0; numer = ( T1*T1*(4.0*T0*T0 + 7.0*T0 + 4.0 + T2) - T2 - 1.0 ); subfrac = a/sqrt(PI)/T0*numer; profile = (T0 < 1.0e-6) ? T1 : T1 - subfrac; return k3*profile; /* m^2 s */ } /* Python callable function */ static PyObject* calc_optdepth(PyObject *self, PyObject *args) { PyArrayObject *arr1=NULL, *arr2=NULL, *arr3=NULL, *arr4=NULL, *arr5=NULL; PyObject *val1=NULL, *val2=NULL, *val3=NULL; PyArrayObject *out_arr=NULL; int ndims, i; long nlos, nbins, nthreads; npy_float64 *n_HI, *v_Hub, *v_pec, *b, *dx; npy_float64 *tau_lya=NULL, *out_ptr; /* Signal handler to allow interrupts */ signal(SIGINT,sighandler); /******************************************************************************/ /* Parse arguments and copy Python variables into C variables */ printf("\nOTCALC running...\n"); if (!PyArg_ParseTuple(args, "O!O!O!O!O!O!O!O!", &PyArray_Type, &arr1, &PyArray_Type, &arr2, &PyArray_Type, &arr3, &PyArray_Type, &arr4, &PyArray_Type, &arr5, &PyLong_Type, &val1, &PyLong_Type, &val2, &PyLong_Type, &val3)) { return NULL; } /* Array dimensions */ ndims = PyArray_DIM(arr1,0); if (PyArray_DIM(arr2,0) != ndims || PyArray_DIM(arr3,0) != ndims || PyArray_DIM(arr4,0) != ndims || PyArray_DIM(arr5,0) != ndims) { PyErr_SetString(PyExc_ValueError, "Input array dimensions don't match"); return NULL; } nlos = PyLong_AsLong(val1); nbins = PyLong_AsLong(val2); if (nbins*nlos != ndims) { PyErr_SetString(PyExc_ValueError, "ndims != nlos*nbins"); return NULL; } printf("Loaded nbins = %ld, nlos = %ld\n",nbins,nlos); nthreads = PyLong_AsLong(val3); omp_set_num_threads(nthreads); printf("Using max. of %d threads for calculation\n", omp_get_max_threads()); /* Allocate memory for C arrays */ n_HI = (npy_float64 *)malloc(sizeof(npy_float64)*ndims); if (n_HI==NULL) { free(n_HI); printf("malloc error!\n"); exit(0); } v_Hub = (npy_float64 *)malloc(sizeof(npy_float64)*ndims); if (v_Hub==NULL) { free(v_Hub); printf("malloc error!\n"); exit(0); } v_pec = (npy_float64 *)malloc(sizeof(npy_float64)*ndims); if (v_pec==NULL) { free(v_pec); printf("malloc error!\n"); exit(0); } b = (npy_float64 *)malloc(sizeof(npy_float64)*ndims); if (b==NULL) { free(b); printf("malloc error!\n"); exit(0); } dx = (npy_float64 *)malloc(sizeof(npy_float64)*ndims); if (dx==NULL) { free(dx); printf("malloc error!\n"); exit(0); } /* Copy Python arrays into C arrays */ for(i=0; i<ndims; i++) { n_HI[i] = *(npy_float64 *)PyArray_GETPTR1(arr1, i); v_Hub[i] = *(npy_float64 *)PyArray_GETPTR1(arr2, i); v_pec[i] = *(npy_float64 *)PyArray_GETPTR1(arr3, i); b[i] = *(npy_float64 *)PyArray_GETPTR1(arr4, i); dx[i] = *(npy_float64 *)PyArray_GETPTR1(arr5, i); } /* Sanity prints */ printf("\nSome sanity prints [proper units]:\n"); printf("n_HI[0] = %lf [m^-3]\n", n_HI[0]); printf("v_Hub[0] = %lf [km/s]\n", v_Hub[0]); printf("v_pec[0] = %lf [km/s]\n", v_pec[0]); printf("b[0] = %lf [m/s]\n", b[0]); printf("dx[0] = %lf [m]\n\n", dx[0]); /******************************************************************************/ /* Optical depth calculation */ tau_lya = (npy_float64 *)calloc(ndims,sizeof(npy_float64)); if (tau_lya==NULL) { free(tau_lya); printf("calloc error!\n"); exit(0); } #pragma omp parallel { /* private (thread-local) variables */ int j, k, inj, ink; npy_float64 v_rel, vp; #pragma omp for private(i) for (i=0; i<nlos; i++) { /* index ordering is (nbins*i + j) */ for (j=0; j<nbins; j++) { inj = i*nbins + j; for (k=0; k<nbins; k++) { ink = i*nbins + k; v_rel = v_Hub[inj] - v_Hub[ink] - v_pec[ink]; /* relative velocity in km/s */ vp = voigt_profile(v_rel, b[ink]); tau_lya[inj] += vp*n_HI[ink]*dx[ink]; /* UNITS: all proper with [vp] = m^2, [n_HI] = m^-3, [dx] = m */ } } } } /* Create output array */ out_arr = PyArray_FromDims(1, &ndims, NPY_DOUBLE); /* Copy optical depths into output array */ for (i=0; i<ndims; i++) { out_ptr = (npy_float64 *)PyArray_GETPTR1(out_arr, i); *out_ptr = tau_lya[i]; } /* Deallocate memory for C arrays */ free(n_HI); free(v_Hub); free(v_pec); free(b); free(dx); free(tau_lya); return PyArray_Return(out_arr); } /*---------------------------------------------------------------------------*/ /* MODULE INITIALIZATION ----------------------------------------------------*/ /* Note module designed for use with Python 3! */ static PyMethodDef otcalc_methods[] = { {"calc_optdepth", calc_optdepth, METH_VARARGS, ""}, {NULL, NULL, 0, NULL} /* Sentinel */ }; static struct PyModuleDef moduledef = { PyModuleDef_HEAD_INIT, "otcalc", NULL, -1, otcalc_methods, NULL, NULL, NULL, NULL, }; PyMODINIT_FUNC *PyInit_otcalc(void) { PyObject *m; m = PyModule_Create(&moduledef); if (m == NULL) { return NULL; } import_array(); return m; }
convolution_7x7.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #if __ARM_NEON #include <arm_neon.h> #endif // __ARM_NEON static void conv7x7s1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const float* kernel = _kernel; const float* bias = _bias; #pragma omp parallel for for (int p=0; p<outch; p++) { Mat out = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; out.fill(bias0); for (int q=0; q<inch; q++) { float* outptr = out; float* outptr2 = out + outw; const float* img0 = bottom_blob.channel(q); const float* kernel0 = kernel + p*inch*49 + q*49; const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w*2; const float* r3 = img0 + w*3; const float* r4 = img0 + w*4; const float* r5 = img0 + w*5; const float* r6 = img0 + w*6; const float* r7 = img0 + w*7; const float* k0 = kernel0; const float* k1 = kernel0 + 7; const float* k2 = kernel0 + 14; const float* k3 = kernel0 + 21; const float* k4 = kernel0 + 28; const float* k5 = kernel0 + 35; const float* k6 = kernel0 + 42; int i = 0; for (; i < outh; i++) { #if __ARM_NEON int nn = outw >> 2; int remain = outw - (nn << 2); #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ for (; nn>0; nn--) { float32x4_t _sum = vld1q_f32(outptr); float32x4_t _k0123 = vld1q_f32(k0); float32x4_t _k4567 = vld1q_f32(k0 + 4); float32x4_t _r00 = vld1q_f32(r0);// 0 1 2 3 float32x4_t _r04 = vld1q_f32(r0 + 4);// 4 5 6 7 float32x4_t _r00n = vld1q_f32(r0 + 8);// 8 9 10 11 float32x4_t _r01 = vextq_f32(_r00, _r04, 1);// 1 2 3 4 float32x4_t _r02 = vextq_f32(_r00, _r04, 2);// 2 3 4 5 float32x4_t _r03 = vextq_f32(_r00, _r04, 3);// 3 4 5 6 float32x4_t _r05 = vextq_f32(_r04, _r00n, 1);// 5 6 7 8 float32x4_t _r06 = vextq_f32(_r04, _r00n, 2);// 6 7 8 9 _sum = vfmaq_laneq_f32(_sum, _r00, _k0123, 0); _sum = vfmaq_laneq_f32(_sum, _r01, _k0123, 1); _sum = vfmaq_laneq_f32(_sum, _r02, _k0123, 2); _sum = vfmaq_laneq_f32(_sum, _r03, _k0123, 3); _sum = vfmaq_laneq_f32(_sum, _r04, _k4567, 0); _sum = vfmaq_laneq_f32(_sum, _r05, _k4567, 1); _sum = vfmaq_laneq_f32(_sum, _r06, _k4567, 2); float32x4_t _k78910 = vld1q_f32(k1); float32x4_t _k11121314 = vld1q_f32(k1 + 4); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r14 = vld1q_f32(r1 + 4); float32x4_t _r10n = vld1q_f32(r1 + 8); float32x4_t _r11 = vextq_f32(_r10, _r14, 1); float32x4_t _r12 = vextq_f32(_r10, _r14, 2); float32x4_t _r13 = vextq_f32(_r10, _r14, 3); float32x4_t _r15 = vextq_f32(_r14, _r10n, 1); float32x4_t _r16 = vextq_f32(_r14, _r10n, 2); _sum = vfmaq_laneq_f32(_sum, _r10, _k78910, 0); _sum = vfmaq_laneq_f32(_sum, _r11, _k78910, 1); _sum = vfmaq_laneq_f32(_sum, _r12, _k78910, 2); _sum = vfmaq_laneq_f32(_sum, _r13, _k78910, 3); _sum = vfmaq_laneq_f32(_sum, _r14, _k11121314, 0); _sum = vfmaq_laneq_f32(_sum, _r15, _k11121314, 1); _sum = vfmaq_laneq_f32(_sum, _r16, _k11121314, 2); float32x4_t _k14151617 = vld1q_f32(k2); float32x4_t _k18192021 = vld1q_f32(k2 + 4); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _r24 = vld1q_f32(r2 + 4); float32x4_t _r20n = vld1q_f32(r2 + 8); float32x4_t _r21 = vextq_f32(_r20, _r24, 1); float32x4_t _r22 = vextq_f32(_r20, _r24, 2); float32x4_t _r23 = vextq_f32(_r20, _r24, 3); float32x4_t _r25 = vextq_f32(_r24, _r20n, 1); float32x4_t _r26 = vextq_f32(_r24, _r20n, 2); _sum = vfmaq_laneq_f32(_sum, _r20, _k14151617, 0); _sum = vfmaq_laneq_f32(_sum, _r21, _k14151617, 1); _sum = vfmaq_laneq_f32(_sum, _r22, _k14151617, 2); _sum = vfmaq_laneq_f32(_sum, _r23, _k14151617, 3); _sum = vfmaq_laneq_f32(_sum, _r24, _k18192021, 0); _sum = vfmaq_laneq_f32(_sum, _r25, _k18192021, 1); _sum = vfmaq_laneq_f32(_sum, _r26, _k18192021, 2); float32x4_t _k21222324 = vld1q_f32(k3); float32x4_t _k25262728 = vld1q_f32(k3 + 4); float32x4_t _r30 = vld1q_f32(r3); float32x4_t _r34 = vld1q_f32(r3 + 4); float32x4_t _r30n = vld1q_f32(r3 + 8); float32x4_t _r31 = vextq_f32(_r30, _r34, 1); float32x4_t _r32 = vextq_f32(_r30, _r34, 2); float32x4_t _r33 = vextq_f32(_r30, _r34, 3); float32x4_t _r35 = vextq_f32(_r34, _r30n, 1); float32x4_t _r36 = vextq_f32(_r34, _r30n, 2); _sum = vfmaq_laneq_f32(_sum, _r30, _k21222324, 0); _sum = vfmaq_laneq_f32(_sum, _r31, _k21222324, 1); _sum = vfmaq_laneq_f32(_sum, _r32, _k21222324, 2); _sum = vfmaq_laneq_f32(_sum, _r33, _k21222324, 3); _sum = vfmaq_laneq_f32(_sum, _r34, _k25262728, 0); _sum = vfmaq_laneq_f32(_sum, _r35, _k25262728, 1); _sum = vfmaq_laneq_f32(_sum, _r36, _k25262728, 2); float32x4_t _k28293031 = vld1q_f32(k4); float32x4_t _k32333435 = vld1q_f32(k4 + 4); float32x4_t _r40 = vld1q_f32(r4); float32x4_t _r44 = vld1q_f32(r4 + 4); float32x4_t _r40n = vld1q_f32(r4 + 8); float32x4_t _r41 = vextq_f32(_r40, _r44, 1); float32x4_t _r42 = vextq_f32(_r40, _r44, 2); float32x4_t _r43 = vextq_f32(_r40, _r44, 3); float32x4_t _r45 = vextq_f32(_r44, _r40n, 1); float32x4_t _r46 = vextq_f32(_r44, _r40n, 2); _sum = vfmaq_laneq_f32(_sum, _r40, _k28293031, 0); _sum = vfmaq_laneq_f32(_sum, _r41, _k28293031, 1); _sum = vfmaq_laneq_f32(_sum, _r42, _k28293031, 2); _sum = vfmaq_laneq_f32(_sum, _r43, _k28293031, 3); _sum = vfmaq_laneq_f32(_sum, _r44, _k32333435, 0); _sum = vfmaq_laneq_f32(_sum, _r45, _k32333435, 1); _sum = vfmaq_laneq_f32(_sum, _r46, _k32333435, 2); float32x4_t _k35363738 = vld1q_f32(k5); float32x4_t _k39404142 = vld1q_f32(k5 + 4); float32x4_t _r50 = vld1q_f32(r5); float32x4_t _r54 = vld1q_f32(r5 + 4); float32x4_t _r50n = vld1q_f32(r5 + 8); float32x4_t _r51 = vextq_f32(_r50, _r54, 1); float32x4_t _r52 = vextq_f32(_r50, _r54, 2); float32x4_t _r53 = vextq_f32(_r50, _r54, 3); float32x4_t _r55 = vextq_f32(_r54, _r50n, 1); float32x4_t _r56 = vextq_f32(_r54, _r50n, 2); _sum = vfmaq_laneq_f32(_sum, _r50, _k35363738, 0); _sum = vfmaq_laneq_f32(_sum, _r51, _k35363738, 1); _sum = vfmaq_laneq_f32(_sum, _r52, _k35363738, 2); _sum = vfmaq_laneq_f32(_sum, _r53, _k35363738, 3); _sum = vfmaq_laneq_f32(_sum, _r54, _k39404142, 0); _sum = vfmaq_laneq_f32(_sum, _r55, _k39404142, 1); _sum = vfmaq_laneq_f32(_sum, _r56, _k39404142, 2); float32x4_t _k42434445 = vld1q_f32(k6); float32x4_t _k46474849 = vld1q_f32(k6 + 4); float32x4_t _r60 = vld1q_f32(r6); float32x4_t _r64 = vld1q_f32(r6 + 4); float32x4_t _r60n = vld1q_f32(r6 + 8); float32x4_t _r61 = vextq_f32(_r60, _r64, 1); float32x4_t _r62 = vextq_f32(_r60, _r64, 2); float32x4_t _r63 = vextq_f32(_r60, _r64, 3); float32x4_t _r65 = vextq_f32(_r64, _r60n, 1); float32x4_t _r66 = vextq_f32(_r64, _r60n, 2); _sum = vfmaq_laneq_f32(_sum, _r60, _k42434445, 0); _sum = vfmaq_laneq_f32(_sum, _r61, _k42434445, 1); _sum = vfmaq_laneq_f32(_sum, _r62, _k42434445, 2); _sum = vfmaq_laneq_f32(_sum, _r63, _k42434445, 3); _sum = vfmaq_laneq_f32(_sum, _r64, _k46474849, 0); _sum = vfmaq_laneq_f32(_sum, _r65, _k46474849, 1); _sum = vfmaq_laneq_f32(_sum, _r66, _k46474849, 2); vst1q_f32(outptr, _sum); r0 += 4; r1 += 4; r2 += 4; r3 += 4; r4 += 4; r5 += 4; r6 += 4; outptr += 4; } #else if (nn > 0) { asm volatile( "0: \n" "pld [%1, #256] \n" "vld1.f32 {d24-d25}, [%1] \n"// _sum "veor q13, q13 \n"// _sum2 = 0; "veor q14, q14 \n"// _sum3 = 0; "veor q15, q15 \n"// _sum4 = 0; "pld [%9, #256] \n" "vld1.f32 {d8-d11}, [%9] \n"// q4 q5 = k0123 k4567 "add %9, #28 \n" "pld [%2, #128] \n" "vld1.f32 {d0-d1}, [%2]! \n"// q0 = 0 1 2 3 "vmla.f32 q12, q0, d8[0] \n" "pld [%2, #256] \n" "vld1.f32 {d4-d7}, [%2] \n"// q2 = 4 5 6 7 q3 = 8 9 10 11 "vmla.f32 q13, q2, d10[0] \n" "vext.32 q1, q0, q2, #1 \n"// q1 = 1 2 3 4 "vext.32 q10, q2, q3, #1 \n"// q10= 5 6 7 8 "vmla.f32 q14, q1, d8[1] \n" "vmla.f32 q15, q10, d10[1] \n" "vext.32 q8, q0, q2, #2 \n"// q8 = 2 3 4 5 "vext.32 q11, q2, q3, #2 \n"// q11= 6 7 8 9 "vmla.f32 q12, q8, d9[0] \n" "vmla.f32 q13, q11, d11[0] \n" "vext.32 q9, q0, q2, #3 \n"// q9 = 3 4 5 6 "vmla.f32 q14, q9, d9[1] \n" "pld [%9, #256] \n" "vld1.f32 {d12-d15}, [%9] \n"// q6 q7 = k78910 k11121314 "add %9, #28 \n" "pld [%3, #128] \n" "vld1.f32 {d0-d1}, [%3]! \n" "vmla.f32 q15, q0, d12[0] \n" "pld [%3, #256] \n" "vld1.f32 {d4-d7}, [%3] \n" "vmla.f32 q12, q2, d14[0] \n" "vext.32 q1, q0, q2, #1 \n" "vext.32 q10, q2, q3, #1 \n" "vmla.f32 q13, q1, d12[1] \n" "vmla.f32 q14, q10, d14[1] \n" "vext.32 q8, q0, q2, #2 \n" "vext.32 q11, q2, q3, #2 \n" "vmla.f32 q15, q8, d13[0] \n" "vmla.f32 q12, q11, d15[0] \n" "vext.32 q9, q0, q2, #3 \n" "vmla.f32 q13, q9, d13[1] \n" "pld [%9, #256] \n" "vld1.f32 {d8-d11}, [%9] \n"// q4 q5 = k14151617 k18192021 "add %9, #28 \n" "pld [%4, #128] \n" "vld1.f32 {d0-d1}, [%4]! \n" "vmla.f32 q14, q0, d8[0] \n" "pld [%4, #256] \n" "vld1.f32 {d4-d7}, [%4] \n" "vmla.f32 q15, q2, d10[0] \n" "vext.32 q1, q0, q2, #1 \n" "vext.32 q10, q2, q3, #1 \n" "vmla.f32 q12, q1, d8[1] \n" "vmla.f32 q13, q10, d10[1] \n" "vext.32 q8, q0, q2, #2 \n" "vext.32 q11, q2, q3, #2 \n" "vmla.f32 q14, q8, d9[0] \n" "vmla.f32 q15, q11, d11[0] \n" "vext.32 q9, q0, q2, #3 \n" "vmla.f32 q12, q9, d9[1] \n" "pld [%9, #256] \n" "vld1.f32 {d12-d15}, [%9] \n"// q6 q7 = k21222324 k25262728 "add %9, #28 \n" "pld [%5, #128] \n" "vld1.f32 {d0-d1}, [%5]! \n" "vmla.f32 q13, q0, d12[0] \n" "pld [%5, #256] \n" "vld1.f32 {d4-d7}, [%5] \n" "vmla.f32 q14, q2, d14[0] \n" "vext.32 q1, q0, q2, #1 \n" "vext.32 q10, q2, q3, #1 \n" "vmla.f32 q15, q1, d12[1] \n" "vmla.f32 q12, q10, d14[1] \n" "vext.32 q8, q0, q2, #2 \n" "vext.32 q11, q2, q3, #2 \n" "vmla.f32 q13, q8, d13[0] \n" "vmla.f32 q14, q11, d15[0] \n" "vext.32 q9, q0, q2, #3 \n" "vmla.f32 q15, q9, d13[1] \n" "pld [%9, #256] \n" "vld1.f32 {d8-d11}, [%9] \n"// q4 q5 = k28293031 k32333435 "add %9, #28 \n" "pld [%6, #128] \n" "vld1.f32 {d0-d1}, [%6]! \n" "vmla.f32 q12, q0, d8[0] \n" "pld [%6, #256] \n" "vld1.f32 {d4-d7}, [%6] \n" "vmla.f32 q13, q2, d10[0] \n" "vext.32 q1, q0, q2, #1 \n" "vext.32 q10, q2, q3, #1 \n" "vmla.f32 q14, q1, d8[1] \n" "vmla.f32 q15, q10, d10[1] \n" "vext.32 q8, q0, q2, #2 \n" "vext.32 q11, q2, q3, #2 \n" "vmla.f32 q12, q8, d9[0] \n" "vmla.f32 q13, q11, d11[0] \n" "vext.32 q9, q0, q2, #3 \n" "vmla.f32 q14, q9, d9[1] \n" "pld [%9, #256] \n" "vld1.f32 {d12-d15}, [%9] \n"// q6 q7 = k35363738 k39404142 "add %9, #28 \n" "pld [%7, #128] \n" "vld1.f32 {d0-d1}, [%7]! \n" "vmla.f32 q15, q0, d12[0] \n" "pld [%7, #256] \n" "vld1.f32 {d4-d7}, [%7] \n" "vmla.f32 q12, q2, d14[0] \n" "vext.32 q1, q0, q2, #1 \n" "vext.32 q10, q2, q3, #1 \n" "vmla.f32 q13, q1, d12[1] \n" "vmla.f32 q14, q10, d14[1] \n" "vext.32 q8, q0, q2, #2 \n" "vext.32 q11, q2, q3, #2 \n" "vmla.f32 q15, q8, d13[0] \n" "vmla.f32 q12, q11, d15[0] \n" "vext.32 q9, q0, q2, #3 \n" "vmla.f32 q13, q9, d13[1] \n" "pld [%9, #256] \n" "vld1.f32 {d8-d11}, [%9] \n"// q4 q5 = k42434445 k46474849 "sub %9, #168 \n"// restore k0 "pld [%8, #128] \n" "vld1.f32 {d0-d1}, [%8]! \n" "vmla.f32 q14, q0, d8[0] \n" "pld [%8, #256] \n" "vld1.f32 {d4-d7}, [%8] \n" "vmla.f32 q15, q2, d10[0] \n" "vext.32 q1, q0, q2, #1 \n" "vext.32 q10, q2, q3, #1 \n" "vmla.f32 q12, q1, d8[1] \n" "vmla.f32 q13, q10, d10[1] \n" "vext.32 q8, q0, q2, #2 \n" "vext.32 q11, q2, q3, #2 \n" "vmla.f32 q14, q8, d9[0] \n" "vmla.f32 q15, q11, d11[0] \n" "vext.32 q9, q0, q2, #3 \n" "vmla.f32 q12, q9, d9[1] \n" "vadd.f32 q13, q13, q14 \n" "vadd.f32 q13, q13, q15 \n" "vadd.f32 q12, q12, q13 \n" "vst1.f32 {d24-d25}, [%1]! \n" "subs %0, #1 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3), // %5 "=r"(r4), // %6 "=r"(r5), // %7 "=r"(r6), // %8 "=r"(k0) // %9 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "6"(r4), "7"(r5), "8"(r6), "9"(k0) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { float sum = 0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r0[3] * k0[3]; sum += r0[4] * k0[4]; sum += r0[5] * k0[5]; sum += r0[6] * k0[6]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r1[3] * k1[3]; sum += r1[4] * k1[4]; sum += r1[5] * k1[5]; sum += r1[6] * k1[6]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; sum += r2[3] * k2[3]; sum += r2[4] * k2[4]; sum += r2[5] * k2[5]; sum += r2[6] * k2[6]; sum += r3[0] * k3[0]; sum += r3[1] * k3[1]; sum += r3[2] * k3[2]; sum += r3[3] * k3[3]; sum += r3[4] * k3[4]; sum += r3[5] * k3[5]; sum += r3[6] * k3[6]; sum += r4[0] * k4[0]; sum += r4[1] * k4[1]; sum += r4[2] * k4[2]; sum += r4[3] * k4[3]; sum += r4[4] * k4[4]; sum += r4[5] * k4[5]; sum += r4[6] * k4[6]; sum += r5[0] * k5[0]; sum += r5[1] * k5[1]; sum += r5[2] * k5[2]; sum += r5[3] * k5[3]; sum += r5[4] * k5[4]; sum += r5[5] * k5[5]; sum += r5[6] * k5[6]; sum += r6[0] * k6[0]; sum += r6[1] * k6[1]; sum += r6[2] * k6[2]; sum += r6[3] * k6[3]; sum += r6[4] * k6[4]; sum += r6[5] * k6[5]; sum += r6[6] * k6[6]; *outptr += sum; r0++; r1++; r2++; r3++; r4++; r5++; r6++; outptr++; } r0 += 6; r1 += 6; r2 += 6; r3 += 6; r4 += 6; r5 += 6; r6 += 6; } } } } static void conv7x7s2_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int tailstep = w - 2*outw + w; const float* kernel = _kernel; const float* bias = _bias; #pragma omp parallel for for (int p=0; p<outch; p++) { Mat out = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; out.fill(bias0); for (int q=0; q<inch; q++) { float* outptr = out; float* outptr2 = out + outw; const float* img0 = bottom_blob.channel(q); const float* kernel0 = kernel + p*inch*49 + q*49; const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w*2; const float* r3 = img0 + w*3; const float* r4 = img0 + w*4; const float* r5 = img0 + w*5; const float* r6 = img0 + w*6; const float* k0 = kernel0; const float* k1 = kernel0 + 7; const float* k2 = kernel0 + 14; const float* k3 = kernel0 + 21; const float* k4 = kernel0 + 28; const float* k5 = kernel0 + 35; const float* k6 = kernel0 + 42; int i = 0; for (; i < outh; i++) { #if __ARM_NEON int nn = outw >> 2; int remain = outw - (nn << 2); #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ for (; nn>0; nn--) { float32x4_t _sum = vld1q_f32(outptr); float32x4_t _k0123 = vld1q_f32(k0); float32x4_t _k4567 = vld1q_f32(k0 + 4); float32x4x2_t _r00_02461357 = vld2q_f32(r0); float32x4x2_t _r00nx2 = vld2q_f32(r0 + 8); float32x4_t _r0_8101214 = _r00nx2.val[0];// 8 10 12 14 float32x4_t _r0_9111315 = _r00nx2.val[1];// 9 11 13 15 float32x4_t _r00 = _r00_02461357.val[0];// 0 2 4 6 float32x4_t _r01 = _r00_02461357.val[1];// 1 3 5 7 float32x4_t _r02 = vextq_f32(_r00, _r0_8101214, 1);// 2 4 6 8 float32x4_t _r03 = vextq_f32(_r01, _r0_9111315, 1);// 3 5 7 9 float32x4_t _r04 = vextq_f32(_r00, _r0_8101214, 2);// 4 6 8 10 float32x4_t _r05 = vextq_f32(_r01, _r0_9111315, 2);// 5 7 9 11 float32x4_t _r06 = vextq_f32(_r00, _r0_8101214, 3);// 6 8 10 12 _sum = vfmaq_laneq_f32(_sum, _r00, _k0123, 0); _sum = vfmaq_laneq_f32(_sum, _r01, _k0123, 1); _sum = vfmaq_laneq_f32(_sum, _r02, _k0123, 2); _sum = vfmaq_laneq_f32(_sum, _r03, _k0123, 3); _sum = vfmaq_laneq_f32(_sum, _r04, _k4567, 0); _sum = vfmaq_laneq_f32(_sum, _r05, _k4567, 1); _sum = vfmaq_laneq_f32(_sum, _r06, _k4567, 2); float32x4_t _k78910 = vld1q_f32(k1); float32x4_t _k11121314 = vld1q_f32(k1 + 4); float32x4x2_t _r10_02461357 = vld2q_f32(r1); float32x4x2_t _r10nx2 = vld2q_f32(r1 + 8); float32x4_t _r1_8101214 = _r10nx2.val[0]; float32x4_t _r1_9111315 = _r10nx2.val[1]; float32x4_t _r10 = _r10_02461357.val[0]; float32x4_t _r11 = _r10_02461357.val[1]; float32x4_t _r12 = vextq_f32(_r10, _r1_8101214, 1); float32x4_t _r13 = vextq_f32(_r11, _r1_9111315, 1); float32x4_t _r14 = vextq_f32(_r10, _r1_8101214, 2); float32x4_t _r15 = vextq_f32(_r11, _r1_9111315, 2); float32x4_t _r16 = vextq_f32(_r10, _r1_8101214, 3); _sum = vfmaq_laneq_f32(_sum, _r10, _k78910, 0); _sum = vfmaq_laneq_f32(_sum, _r11, _k78910, 1); _sum = vfmaq_laneq_f32(_sum, _r12, _k78910, 2); _sum = vfmaq_laneq_f32(_sum, _r13, _k78910, 3); _sum = vfmaq_laneq_f32(_sum, _r14, _k11121314, 0); _sum = vfmaq_laneq_f32(_sum, _r15, _k11121314, 1); _sum = vfmaq_laneq_f32(_sum, _r16, _k11121314, 2); float32x4_t _k14151617 = vld1q_f32(k2); float32x4_t _k18192021 = vld1q_f32(k2 + 4); float32x4x2_t _r20_02461357 = vld2q_f32(r2); float32x4x2_t _r20nx2 = vld2q_f32(r2 + 8); float32x4_t _r2_8101214 = _r20nx2.val[0]; float32x4_t _r2_9111315 = _r20nx2.val[1]; float32x4_t _r20 = _r20_02461357.val[0]; float32x4_t _r21 = _r20_02461357.val[1]; float32x4_t _r22 = vextq_f32(_r20, _r2_8101214, 1); float32x4_t _r23 = vextq_f32(_r21, _r2_9111315, 1); float32x4_t _r24 = vextq_f32(_r20, _r2_8101214, 2); float32x4_t _r25 = vextq_f32(_r21, _r2_9111315, 2); float32x4_t _r26 = vextq_f32(_r20, _r2_8101214, 3); _sum = vfmaq_laneq_f32(_sum, _r20, _k14151617, 0); _sum = vfmaq_laneq_f32(_sum, _r21, _k14151617, 1); _sum = vfmaq_laneq_f32(_sum, _r22, _k14151617, 2); _sum = vfmaq_laneq_f32(_sum, _r23, _k14151617, 3); _sum = vfmaq_laneq_f32(_sum, _r24, _k18192021, 0); _sum = vfmaq_laneq_f32(_sum, _r25, _k18192021, 1); _sum = vfmaq_laneq_f32(_sum, _r26, _k18192021, 2); float32x4_t _k21222324 = vld1q_f32(k3); float32x4_t _k25262728 = vld1q_f32(k3 + 4); float32x4x2_t _r30_02461357 = vld2q_f32(r3); float32x4x2_t _r30nx2 = vld2q_f32(r3 + 8); float32x4_t _r3_8101214 = _r30nx2.val[0]; float32x4_t _r3_9111315 = _r30nx2.val[1]; float32x4_t _r30 = _r30_02461357.val[0]; float32x4_t _r31 = _r30_02461357.val[1]; float32x4_t _r32 = vextq_f32(_r30, _r3_8101214, 1); float32x4_t _r33 = vextq_f32(_r31, _r3_9111315, 1); float32x4_t _r34 = vextq_f32(_r30, _r3_8101214, 2); float32x4_t _r35 = vextq_f32(_r31, _r3_9111315, 2); float32x4_t _r36 = vextq_f32(_r30, _r3_8101214, 3); _sum = vfmaq_laneq_f32(_sum, _r30, _k21222324, 0); _sum = vfmaq_laneq_f32(_sum, _r31, _k21222324, 1); _sum = vfmaq_laneq_f32(_sum, _r32, _k21222324, 2); _sum = vfmaq_laneq_f32(_sum, _r33, _k21222324, 3); _sum = vfmaq_laneq_f32(_sum, _r34, _k25262728, 0); _sum = vfmaq_laneq_f32(_sum, _r35, _k25262728, 1); _sum = vfmaq_laneq_f32(_sum, _r36, _k25262728, 2); float32x4_t _k28293031 = vld1q_f32(k4); float32x4_t _k32333435 = vld1q_f32(k4 + 4); float32x4x2_t _r40_02461357 = vld2q_f32(r4); float32x4x2_t _r40nx2 = vld2q_f32(r4 + 8); float32x4_t _r4_8101214 = _r40nx2.val[0]; float32x4_t _r4_9111315 = _r40nx2.val[1]; float32x4_t _r40 = _r40_02461357.val[0]; float32x4_t _r41 = _r40_02461357.val[1]; float32x4_t _r42 = vextq_f32(_r40, _r4_8101214, 1); float32x4_t _r43 = vextq_f32(_r41, _r4_9111315, 1); float32x4_t _r44 = vextq_f32(_r40, _r4_8101214, 2); float32x4_t _r45 = vextq_f32(_r41, _r4_9111315, 2); float32x4_t _r46 = vextq_f32(_r40, _r4_8101214, 3); _sum = vfmaq_laneq_f32(_sum, _r40, _k28293031, 0); _sum = vfmaq_laneq_f32(_sum, _r41, _k28293031, 1); _sum = vfmaq_laneq_f32(_sum, _r42, _k28293031, 2); _sum = vfmaq_laneq_f32(_sum, _r43, _k28293031, 3); _sum = vfmaq_laneq_f32(_sum, _r44, _k32333435, 0); _sum = vfmaq_laneq_f32(_sum, _r45, _k32333435, 1); _sum = vfmaq_laneq_f32(_sum, _r46, _k32333435, 2); float32x4_t _k35363738 = vld1q_f32(k5); float32x4_t _k39404142 = vld1q_f32(k5 + 4); float32x4x2_t _r50_02461357 = vld2q_f32(r5); float32x4x2_t _r50nx2 = vld2q_f32(r5 + 8); float32x4_t _r5_8101214 = _r50nx2.val[0]; float32x4_t _r5_9111315 = _r50nx2.val[1]; float32x4_t _r50 = _r50_02461357.val[0]; float32x4_t _r51 = _r50_02461357.val[1]; float32x4_t _r52 = vextq_f32(_r50, _r5_8101214, 1); float32x4_t _r53 = vextq_f32(_r51, _r5_9111315, 1); float32x4_t _r54 = vextq_f32(_r50, _r5_8101214, 2); float32x4_t _r55 = vextq_f32(_r51, _r5_9111315, 2); float32x4_t _r56 = vextq_f32(_r50, _r5_8101214, 3); _sum = vfmaq_laneq_f32(_sum, _r50, _k35363738, 0); _sum = vfmaq_laneq_f32(_sum, _r51, _k35363738, 1); _sum = vfmaq_laneq_f32(_sum, _r52, _k35363738, 2); _sum = vfmaq_laneq_f32(_sum, _r53, _k35363738, 3); _sum = vfmaq_laneq_f32(_sum, _r54, _k39404142, 0); _sum = vfmaq_laneq_f32(_sum, _r55, _k39404142, 1); _sum = vfmaq_laneq_f32(_sum, _r56, _k39404142, 2); float32x4_t _k42434445 = vld1q_f32(k6); float32x4_t _k46474849 = vld1q_f32(k6 + 4); float32x4x2_t _r60_02461357 = vld2q_f32(r6); float32x4x2_t _r60nx2 = vld2q_f32(r6 + 8); float32x4_t _r6_8101214 = _r60nx2.val[0]; float32x4_t _r6_9111315 = _r60nx2.val[1]; float32x4_t _r60 = _r60_02461357.val[0]; float32x4_t _r61 = _r60_02461357.val[1]; float32x4_t _r62 = vextq_f32(_r60, _r6_8101214, 1); float32x4_t _r63 = vextq_f32(_r61, _r6_9111315, 1); float32x4_t _r64 = vextq_f32(_r60, _r6_8101214, 2); float32x4_t _r65 = vextq_f32(_r61, _r6_9111315, 2); float32x4_t _r66 = vextq_f32(_r60, _r6_8101214, 3); _sum = vfmaq_laneq_f32(_sum, _r60, _k42434445, 0); _sum = vfmaq_laneq_f32(_sum, _r61, _k42434445, 1); _sum = vfmaq_laneq_f32(_sum, _r62, _k42434445, 2); _sum = vfmaq_laneq_f32(_sum, _r63, _k42434445, 3); _sum = vfmaq_laneq_f32(_sum, _r64, _k46474849, 0); _sum = vfmaq_laneq_f32(_sum, _r65, _k46474849, 1); _sum = vfmaq_laneq_f32(_sum, _r66, _k46474849, 2); vst1q_f32(outptr, _sum); r0 += 8; r1 += 8; r2 += 8; r3 += 8; r4 += 8; r5 += 8; r6 += 8; outptr += 4; } #else if (nn > 0) { asm volatile( "0: \n" "pld [%1, #256] \n" "vld1.f32 {d26-d27}, [%1] \n"// _sum "veor q14, q14 \n"// _sum2 = 0; "veor q15, q15 \n"// _sum3 = 0; "pld [%9, #256] \n" "vld1.f32 {d8-d11}, [%9] \n"// q4 q5 = k0123 k4567 "add %9, #28 \n" "pld [%2, #512] \n" "vld2.f32 {d0-d3}, [%2]! \n"// q0 = 0 2 4 6 q1 = 1 3 5 7 "vmla.f32 q13, q0, d8[0] \n" "vmla.f32 q14, q1, d8[1] \n" "vld2.f32 {d4-d7}, [%2] \n"// q2 = 8 10 12 14 q3 = 9 11 13 15 "vext.32 q8, q0, q2, #1 \n"// q8 = 2 4 6 8 "vext.32 q9, q1, q3, #1 \n"// q9 = 3 5 7 9 "vmla.f32 q15, q8, d9[0] \n" "vmla.f32 q13, q9, d9[1] \n" "vext.32 q10, q0, q2, #2 \n"// q10= 4 6 8 10 "vext.32 q11, q1, q3, #2 \n"// q11= 5 7 9 11 "vmla.f32 q14, q10, d10[0] \n" "vmla.f32 q15, q11, d10[1] \n" "vext.32 q12, q0, q2, #3 \n"// q12= 6 8 10 12 "vmla.f32 q13, q12, d11[0] \n" "pld [%9, #256] \n" "vld1.f32 {d12-d15}, [%9] \n"// q6 q7 = k78910 k11121314 "add %9, #28 \n" "pld [%3, #512] \n" "vld2.f32 {d0-d3}, [%3]! \n" "vmla.f32 q14, q0, d12[0] \n" "vmla.f32 q15, q1, d12[1] \n" "vld2.f32 {d4-d7}, [%3] \n" "vext.32 q8, q0, q2, #1 \n" "vext.32 q9, q1, q3, #1 \n" "vmla.f32 q13, q8, d13[0] \n" "vmla.f32 q14, q9, d13[1] \n" "vext.32 q10, q0, q2, #2 \n" "vext.32 q11, q1, q3, #2 \n" "vmla.f32 q15, q10, d14[0] \n" "vmla.f32 q13, q11, d14[1] \n" "vext.32 q12, q0, q2, #3 \n" "vmla.f32 q14, q12, d15[0] \n" "pld [%9, #256] \n" "vld1.f32 {d8-d11}, [%9] \n"// q4 q5 = k14151617 k18192021 "add %9, #28 \n" "pld [%4, #512] \n" "vld2.f32 {d0-d3}, [%4]! \n" "vmla.f32 q15, q0, d8[0] \n" "vmla.f32 q13, q1, d8[1] \n" "vld2.f32 {d4-d7}, [%4] \n" "vext.32 q8, q0, q2, #1 \n" "vext.32 q9, q1, q3, #1 \n" "vmla.f32 q14, q8, d9[0] \n" "vmla.f32 q15, q9, d9[1] \n" "vext.32 q10, q0, q2, #2 \n" "vext.32 q11, q1, q3, #2 \n" "vmla.f32 q13, q10, d10[0] \n" "vmla.f32 q14, q11, d10[1] \n" "vext.32 q12, q0, q2, #3 \n" "vmla.f32 q15, q12, d11[0] \n" "pld [%9, #256] \n" "vld1.f32 {d12-d15}, [%9] \n"// q6 q7 = k21222324 k25262728 "add %9, #28 \n" "pld [%5, #512] \n" "vld2.f32 {d0-d3}, [%5]! \n" "vmla.f32 q13, q0, d12[0] \n" "vmla.f32 q14, q1, d12[1] \n" "vld2.f32 {d4-d7}, [%5] \n" "vext.32 q8, q0, q2, #1 \n" "vext.32 q9, q1, q3, #1 \n" "vmla.f32 q15, q8, d13[0] \n" "vmla.f32 q13, q9, d13[1] \n" "vext.32 q10, q0, q2, #2 \n" "vext.32 q11, q1, q3, #2 \n" "vmla.f32 q14, q10, d14[0] \n" "vmla.f32 q15, q11, d14[1] \n" "vext.32 q12, q0, q2, #3 \n" "vmla.f32 q13, q12, d15[0] \n" "pld [%9, #256] \n" "vld1.f32 {d8-d11}, [%9] \n"// q4 q5 = k28293031 k32333435 "add %9, #28 \n" "pld [%6, #512] \n" "vld2.f32 {d0-d3}, [%6]! \n" "vmla.f32 q14, q0, d8[0] \n" "vmla.f32 q15, q1, d8[1] \n" "vld2.f32 {d4-d7}, [%6] \n" "vext.32 q8, q0, q2, #1 \n" "vext.32 q9, q1, q3, #1 \n" "vmla.f32 q13, q8, d9[0] \n" "vmla.f32 q14, q9, d9[1] \n" "vext.32 q10, q0, q2, #2 \n" "vext.32 q11, q1, q3, #2 \n" "vmla.f32 q15, q10, d10[0] \n" "vmla.f32 q13, q11, d10[1] \n" "vext.32 q12, q0, q2, #3 \n" "vmla.f32 q14, q12, d11[0] \n" "pld [%9, #256] \n" "vld1.f32 {d12-d15}, [%9] \n"// q6 q7 = k35363738 k39404142 "add %9, #28 \n" "pld [%7, #512] \n" "vld2.f32 {d0-d3}, [%7]! \n" "vmla.f32 q15, q0, d12[0] \n" "vmla.f32 q13, q1, d12[1] \n" "vld2.f32 {d4-d7}, [%7] \n" "vext.32 q8, q0, q2, #1 \n" "vext.32 q9, q1, q3, #1 \n" "vmla.f32 q14, q8, d13[0] \n" "vmla.f32 q15, q9, d13[1] \n" "vext.32 q10, q0, q2, #2 \n" "vext.32 q11, q1, q3, #2 \n" "vmla.f32 q13, q10, d14[0] \n" "vmla.f32 q14, q11, d14[1] \n" "vext.32 q12, q0, q2, #3 \n" "vmla.f32 q15, q12, d15[0] \n" "pld [%9, #256] \n" "vld1.f32 {d8-d11}, [%9] \n"// q4 q5 = k42434445 k46474849 "sub %9, #168 \n"// restore k0 "pld [%8, #512] \n" "vld2.f32 {d0-d3}, [%8]! \n" "vmla.f32 q13, q0, d8[0] \n" "vmla.f32 q14, q1, d8[1] \n" "vld2.f32 {d4-d7}, [%8] \n" "vext.32 q8, q0, q2, #1 \n" "vext.32 q9, q1, q3, #1 \n" "vmla.f32 q15, q8, d9[0] \n" "vmla.f32 q13, q9, d9[1] \n" "vext.32 q10, q0, q2, #2 \n" "vext.32 q11, q1, q3, #2 \n" "vmla.f32 q14, q10, d10[0] \n" "vmla.f32 q15, q11, d10[1] \n" "vext.32 q12, q0, q2, #3 \n" "vmla.f32 q13, q12, d11[0] \n" "vadd.f32 q14, q14, q15 \n" "vadd.f32 q13, q13, q14 \n" "vst1.f32 {d26-d27}, [%1]! \n" "subs %0, #1 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3), // %5 "=r"(r4), // %6 "=r"(r5), // %7 "=r"(r6), // %8 "=r"(k0) // %9 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "6"(r4), "7"(r5), "8"(r6), "9"(k0) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { float sum = 0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r0[3] * k0[3]; sum += r0[4] * k0[4]; sum += r0[5] * k0[5]; sum += r0[6] * k0[6]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r1[3] * k1[3]; sum += r1[4] * k1[4]; sum += r1[5] * k1[5]; sum += r1[6] * k1[6]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; sum += r2[3] * k2[3]; sum += r2[4] * k2[4]; sum += r2[5] * k2[5]; sum += r2[6] * k2[6]; sum += r3[0] * k3[0]; sum += r3[1] * k3[1]; sum += r3[2] * k3[2]; sum += r3[3] * k3[3]; sum += r3[4] * k3[4]; sum += r3[5] * k3[5]; sum += r3[6] * k3[6]; sum += r4[0] * k4[0]; sum += r4[1] * k4[1]; sum += r4[2] * k4[2]; sum += r4[3] * k4[3]; sum += r4[4] * k4[4]; sum += r4[5] * k4[5]; sum += r4[6] * k4[6]; sum += r5[0] * k5[0]; sum += r5[1] * k5[1]; sum += r5[2] * k5[2]; sum += r5[3] * k5[3]; sum += r5[4] * k5[4]; sum += r5[5] * k5[5]; sum += r5[6] * k5[6]; sum += r6[0] * k6[0]; sum += r6[1] * k6[1]; sum += r6[2] * k6[2]; sum += r6[3] * k6[3]; sum += r6[4] * k6[4]; sum += r6[5] * k6[5]; sum += r6[6] * k6[6]; *outptr += sum; r0 += 2; r1 += 2; r2 += 2; r3 += 2; r4 += 2; r5 += 2; r6 += 2; outptr++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; r3 += tailstep; r4 += tailstep; r5 += tailstep; r6 += tailstep; } } } }
elemwise_binary_scalar_op.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2016 by Contributors * \file elemwise_binary_scalar_op.h * \brief Function definition of elementwise binary scalar operators */ #ifndef MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_SCALAR_OP_H_ #define MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_SCALAR_OP_H_ #include <mxnet/operator_util.h> #include <vector> #include <utility> #include "../mshadow_op.h" #include "../elemwise_op_common.h" #include "elemwise_unary_op.h" namespace mxnet { namespace op { class BinaryScalarOp : public UnaryOp { /*! \brief Tensor operation against a scalar with a dense result */ template<typename OP, typename DType, typename IType> static void ComputeExDenseResultRsp(mshadow::Stream<cpu> *stream, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &input, const OpReqType req, const NDArray &output) { const double alpha = nnvm::get<double>(attrs.parsed); CHECK_EQ(output.shape(), input.shape()); const int64_t row_count = output.shape()[0]; const int64_t items_per_row = output.shape().Size() / row_count; const DType result_for_zero = OP::Map(DType(0), DType(alpha)); mshadow::Tensor<cpu, 1, DType> input_data = input.data().FlatTo1D<cpu, DType>(stream); mshadow::Tensor<cpu, 1, DType> output_data = output.data().FlatTo1D<cpu, DType>(stream); const int64_t sparse_row_count = input.aux_shape(rowsparse::kIdx).Size(); if (sparse_row_count != row_count) { mshadow::Tensor<cpu, 1, IType> row_indexes = input.aux_data( rowsparse::kIdx).FlatTo1D<cpu, IType>(stream); int64_t input_iter = 0; int64_t output_row = 0; IType next_input_row = 0; while (output_row < row_count) { next_input_row = input_iter < sparse_row_count ? int64_t(row_indexes[input_iter]) : row_count; // Split up into blocks of contiguous data and do those together // Do contiguous dense blocks const int64_t dense_block_count = next_input_row - output_row; if (dense_block_count > 0) { MXNET_ASSIGN_REQ_SWITCH(req, Req, { mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::identity, Req>, cpu>::Launch( stream, items_per_row * dense_block_count, output_data.dptr_ + items_per_row * output_row, result_for_zero); }); output_row += dense_block_count; continue; } // Do contiguous sparse blocks int64_t next_non_contiguous_sparse = input_iter; while (next_non_contiguous_sparse < sparse_row_count - 1) { if (row_indexes[next_non_contiguous_sparse + 1] != row_indexes[next_non_contiguous_sparse] + 1) { break; } ++next_non_contiguous_sparse; } const int64_t sparse_block_count = next_non_contiguous_sparse - input_iter + 1; if (sparse_block_count > 0) { MXNET_ASSIGN_REQ_SWITCH(req, Req, { mxnet_op::Kernel<mxnet_op::op_with_req<OP, Req>, cpu>::Launch( stream, items_per_row * sparse_block_count, &output_data.dptr_[items_per_row * output_row], &input_data.dptr_[items_per_row * input_iter], DType(alpha)); }); output_row += sparse_block_count; input_iter += sparse_block_count; continue; } } } else { // All rows exist (eventually we don't have to do complex // things to call GPU kernels because we don't need to access row indices) MXNET_ASSIGN_REQ_SWITCH(req, Req, { mxnet_op::Kernel<mxnet_op::op_with_req<OP, Req>, cpu>::Launch( stream, items_per_row * row_count, output_data.dptr_, input_data.dptr_, DType(alpha)); }); } } /*! \brief Tensor operation against a scalar with a dense result */ template<typename OP, typename DType, typename IType> static void ComputeExDenseResultRsp(mshadow::Stream<gpu> *stream, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &input, const OpReqType req, const NDArray &output) { LOG(FATAL) << "NOT IMPLEMENTED"; } /*! \brief Tensor operation against a scalar with a dense result */ template<typename OP, typename DType, typename IType, typename CType> static void ComputeExDenseResultCsr(mshadow::Stream<cpu> *stream, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &input, const OpReqType req, const NDArray &output) { CHECK_EQ(output.shape(), input.shape()); const double alpha = nnvm::get<double>(attrs.parsed); const DType dense_fill_val = OP::Map(DType(0), DType(alpha)); const TBlob column_indexes = input.aux_data(csr::kIdx); const size_t item_count = column_indexes.Size(); // Pre-fill dense with 0-input/output value FillDense<DType>(stream, output.shape().Size(), dense_fill_val, req, output.data().dptr<DType>()); mshadow::Tensor<cpu, 2, DType> out = AsRowise2D<DType>(stream, output.data()); if (item_count) { const DType *in = input.data().dptr<DType>(); const IType *column_indexes_ptr = column_indexes.dptr<IType>(); const auto row_count = static_cast<size_t>(input.shape()[0]); const TBlob row_starts = input.aux_data(csr::kIndPtr); const CType *row_starts_ptr = row_starts.dptr<CType>(); #pragma omp parallel for for (int i = 0; i < static_cast<int>(row_count); ++i) { const bool last_row = i == static_cast<int>(row_count) - 1; // Split up into blocks of contiguous data and do those together const size_t row_item_start_iter = row_starts_ptr[i]; const size_t input_items_this_row = !last_row ? static_cast<size_t>(row_starts_ptr[i + 1]) - row_item_start_iter : item_count - row_item_start_iter; if (input_items_this_row) { const IType *this_row_column_indexes = column_indexes_ptr + row_item_start_iter; const DType *row_data_start = in + row_item_start_iter; DType *output_this_row = out[i].dptr_; // More overhead to use OMP for small loops, so don't if (input_items_this_row > 1000) { #pragma omp parallel for for (CType j = 0; j < static_cast<CType>(input_items_this_row); ++j) { const IType col = this_row_column_indexes[j]; const DType val = row_data_start[j]; output_this_row[col] = OP::Map(val, DType(alpha)); } } else { for (CType j = 0; j < static_cast<CType>(input_items_this_row); ++j) { const IType col = this_row_column_indexes[j]; const DType val = row_data_start[j]; output_this_row[col] = OP::Map(val, DType(alpha)); } } } } } } /*! \brief Tensor operation against a scalar with a dense result */ template<typename OP, typename DType, typename IType, typename CType> static void ComputeExDenseResultCsr(mshadow::Stream<gpu> *stream, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &input, const OpReqType req, const NDArray &output) { LOG(FATAL) << "NOT IMPLEMENTED"; } template<typename xpu, typename OP, typename DType, typename IType> static void ComputeExDenseResult(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &input, const OpReqType req, const NDArray output) { mshadow::Stream<xpu> *stream = ctx.get_stream<xpu>(); CHECK_EQ(output.storage_type(), kDefaultStorage); switch (input.storage_type()) { case kRowSparseStorage: { ComputeExDenseResultRsp<OP, DType, IType>(stream, attrs, ctx, input, req, output); break; } case kCSRStorage: { MSHADOW_IDX_TYPE_SWITCH(input.aux_data(csr::kIndPtr).type_flag_, CType, { ComputeExDenseResultCsr<OP, DType, IType, CType>(stream, attrs, ctx, input, req, output); }); break; } default: CHECK(false) << "Unsupported sparse storage type"; break; } } public: template<typename xpu, typename OP> static void Compute(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { DCHECK_EQ(inputs.size(), 1); DCHECK_EQ(outputs.size(), 1); using namespace mshadow; using namespace mshadow::expr; Stream<xpu> *s = ctx.get_stream<xpu>(); const double alpha = nnvm::get<double>(attrs.parsed); MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { mxnet_op::Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch( s, inputs[0].Size(), outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), DType(alpha)); }); }); } template<typename xpu, typename OP> static void ComputeInt(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { DCHECK_EQ(inputs.size(), 1); DCHECK_EQ(outputs.size(), 1); using namespace mshadow; using namespace mshadow::expr; Stream<xpu> *s = ctx.get_stream<xpu>(); const double alpha = nnvm::get<double>(attrs.parsed); MXNET_INT_TYPE_SWITCH(outputs[0].type_flag_, DType, { MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { mxnet_op::Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch( s, inputs[0].Size(), outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), DType(alpha)); }); }); } template<typename xpu, typename OP> static void ComputeLogic(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { DCHECK_EQ(inputs.size(), 1); DCHECK_EQ(outputs.size(), 1); using namespace mshadow; using namespace mshadow::expr; Stream<xpu> *s = ctx.get_stream<xpu>(); const double alpha = nnvm::get<double>(attrs.parsed); MSHADOW_TYPE_SWITCH_WITH_BOOL(inputs[0].type_flag_, DType, { MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { mxnet_op::Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch( s, inputs[0].Size(), outputs[0].dptr<bool>(), inputs[0].dptr<DType>(), DType(alpha)); }); }); } template<typename xpu, typename OP> static void ComputeEx(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<NDArray> &inputs, const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs) { DCHECK_EQ(inputs.size(), 1); DCHECK_EQ(outputs.size(), 1); const auto in_stype = inputs[0].storage_type(); const auto out_stype = outputs[0].storage_type(); if (req[0] == kNullOp) { return; } if ((in_stype == kRowSparseStorage && out_stype == kRowSparseStorage) || (in_stype == kCSRStorage && out_stype == kCSRStorage)) { // csr -> csr, or rsp -> rsp UnaryOp::MapToFCompute<xpu>(attrs, ctx, inputs, req, outputs, Compute<xpu, OP>); } else if (out_stype == kDefaultStorage && (in_stype == kRowSparseStorage || in_stype == kCSRStorage)) { MSHADOW_TYPE_SWITCH(outputs[0].data().type_flag_, DType, { MSHADOW_IDX_TYPE_SWITCH(inputs[0].aux_type(rowsparse::kIdx), IType, { ComputeExDenseResult<xpu, OP, DType, IType>(attrs, ctx, inputs[0], req[0], outputs[0]); }); }); } else { LogUnimplementedOp(attrs, ctx, inputs, req, outputs); } } template<typename xpu, typename OP> static void LogicComputeEx(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<NDArray> &inputs, const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs) { DCHECK_EQ(inputs.size(), 1); DCHECK_EQ(outputs.size(), 1); const auto in_stype = inputs[0].storage_type(); const auto out_stype = outputs[0].storage_type(); if (req[0] == kNullOp) { return; } if ((in_stype == kRowSparseStorage && out_stype == kRowSparseStorage) || (in_stype == kCSRStorage && out_stype == kCSRStorage)) { // csr -> csr, or rsp -> rsp UnaryOp::MapToFCompute<xpu>(attrs, ctx, inputs, req, outputs, Compute<xpu, OP>); } else { LogUnimplementedOp(attrs, ctx, inputs, req, outputs); } } template<typename xpu, typename OP> static void Backward(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mshadow; using namespace mshadow::expr; Stream<xpu> *s = ctx.get_stream<xpu>(); const double alpha = nnvm::get<double>(attrs.parsed); MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { mxnet::op::mxnet_op::Kernel<mxnet::op::mxnet_op::op_with_req< mxnet::op::mxnet_op::backward_grad_tuned<OP>, Req>, xpu>:: Launch(s, inputs[0].Size(), outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), inputs[1].dptr<DType>(), DType(alpha)); }); }); } }; #define MXNET_OPERATOR_REGISTER_BINARY_SCALAR(name) \ NNVM_REGISTER_OP(name) \ .set_num_inputs(1) \ .set_num_outputs(1) \ .set_attr_parser([](NodeAttrs* attrs) { \ attrs->parsed = std::stod(attrs->dict["scalar"]); \ }) \ .set_attr<mxnet::FInferShape>("FInferShape", ElemwiseShape<1, 1>) \ .set_attr<nnvm::FInferType>("FInferType", ElemwiseType<1, 1>) \ .set_attr<nnvm::FInplaceOption>("FInplaceOption", \ [](const NodeAttrs& attrs){ \ return std::vector<std::pair<int, int> >{{0, 0}}; \ }) \ .add_argument("data", "NDArray-or-Symbol", "source input") \ .add_argument("scalar", "float", "scalar input") } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_SCALAR_OP_H_
parser.c
/* C++ Parser. Copyright (C) 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc. Written by Mark Mitchell <mark@codesourcery.com>. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #include "config.h" #include "system.h" #include "coretypes.h" #include "tm.h" #include "dyn-string.h" #include "varray.h" #include "cpplib.h" #include "tree.h" #include "cp-tree.h" #include "c-pragma.h" #include "decl.h" #include "flags.h" #include "diagnostic.h" #include "toplev.h" #include "output.h" #include "target.h" /* APPLE LOCAL 4133801 */ #include "debug.h" #include "cgraph.h" #include "c-common.h" /* APPLE LOCAL pascal strings */ #include "../../libcpp/internal.h" /* APPLE LOCAL C* language */ #include "tree-iterator.h" /* The lexer. */ /* The cp_lexer_* routines mediate between the lexer proper (in libcpp and c-lex.c) and the C++ parser. */ /* A token's value and its associated deferred access checks and qualifying scope. */ struct tree_check GTY(()) { /* The value associated with the token. */ tree value; /* The checks that have been associated with value. */ VEC (deferred_access_check, gc)* checks; /* The token's qualifying scope (used when it is a CPP_NESTED_NAME_SPECIFIER). */ tree qualifying_scope; }; /* A C++ token. */ typedef struct cp_token GTY (()) { /* The kind of token. */ ENUM_BITFIELD (cpp_ttype) type : 8; /* If this token is a keyword, this value indicates which keyword. Otherwise, this value is RID_MAX. */ ENUM_BITFIELD (rid) keyword : 8; /* Token flags. */ unsigned char flags; /* Identifier for the pragma. */ ENUM_BITFIELD (pragma_kind) pragma_kind : 6; /* True if this token is from a system header. */ BOOL_BITFIELD in_system_header : 1; /* True if this token is from a context where it is implicitly extern "C" */ BOOL_BITFIELD implicit_extern_c : 1; /* True for a CPP_NAME token that is not a keyword (i.e., for which KEYWORD is RID_MAX) iff this name was looked up and found to be ambiguous. An error has already been reported. */ BOOL_BITFIELD ambiguous_p : 1; /* The input file stack index at which this token was found. */ unsigned input_file_stack_index : INPUT_FILE_STACK_BITS; /* The value associated with this token, if any. */ union cp_token_value { /* Used for CPP_NESTED_NAME_SPECIFIER and CPP_TEMPLATE_ID. */ struct tree_check* GTY((tag ("1"))) tree_check_value; /* Use for all other tokens. */ tree GTY((tag ("0"))) value; } GTY((desc ("(%1.type == CPP_TEMPLATE_ID) || (%1.type == CPP_NESTED_NAME_SPECIFIER)"))) u; /* The location at which this token was found. */ location_t location; } cp_token; /* We use a stack of token pointer for saving token sets. */ typedef struct cp_token *cp_token_position; DEF_VEC_P (cp_token_position); DEF_VEC_ALLOC_P (cp_token_position,heap); static const cp_token eof_token = { CPP_EOF, RID_MAX, 0, PRAGMA_NONE, 0, 0, false, 0, { NULL }, #if USE_MAPPED_LOCATION 0 #else {0, 0} #endif }; /* The cp_lexer structure represents the C++ lexer. It is responsible for managing the token stream from the preprocessor and supplying it to the parser. Tokens are never added to the cp_lexer after it is created. */ typedef struct cp_lexer GTY (()) { /* The memory allocated for the buffer. NULL if this lexer does not own the token buffer. */ cp_token * GTY ((length ("%h.buffer_length"))) buffer; /* If the lexer owns the buffer, this is the number of tokens in the buffer. */ size_t buffer_length; /* A pointer just past the last available token. The tokens in this lexer are [buffer, last_token). */ cp_token_position GTY ((skip)) last_token; /* The next available token. If NEXT_TOKEN is &eof_token, then there are no more available tokens. */ cp_token_position GTY ((skip)) next_token; /* A stack indicating positions at which cp_lexer_save_tokens was called. The top entry is the most recent position at which we began saving tokens. If the stack is non-empty, we are saving tokens. */ VEC(cp_token_position,heap) *GTY ((skip)) saved_tokens; /* The next lexer in a linked list of lexers. */ struct cp_lexer *next; /* True if we should output debugging information. */ bool debugging_p; /* True if we're in the context of parsing a pragma, and should not increment past the end-of-line marker. */ bool in_pragma; } cp_lexer; /* cp_token_cache is a range of tokens. There is no need to represent allocate heap memory for it, since tokens are never removed from the lexer's array. There is also no need for the GC to walk through a cp_token_cache, since everything in here is referenced through a lexer. */ typedef struct cp_token_cache GTY(()) { /* The beginning of the token range. */ cp_token * GTY((skip)) first; /* Points immediately after the last token in the range. */ cp_token * GTY ((skip)) last; } cp_token_cache; /* APPLE LOCAL begin C* language */ /* APPLE LOCAL radar 5130983 */ int lvalue_or_else (tree*, enum lvalue_use); static void objc_finish_foreach_stmt (tree); /* APPLE LOCAL end C* language */ /* Prototypes. */ static cp_lexer *cp_lexer_new_main (void); static cp_lexer *cp_lexer_new_from_tokens (cp_token_cache *tokens); static void cp_lexer_destroy (cp_lexer *); static int cp_lexer_saving_tokens (const cp_lexer *); static cp_token_position cp_lexer_token_position (cp_lexer *, bool); static cp_token *cp_lexer_token_at (cp_lexer *, cp_token_position); static void cp_lexer_get_preprocessor_token (cp_lexer *, cp_token *); static inline cp_token *cp_lexer_peek_token (cp_lexer *); static cp_token *cp_lexer_peek_nth_token (cp_lexer *, size_t); static inline bool cp_lexer_next_token_is (cp_lexer *, enum cpp_ttype); static bool cp_lexer_next_token_is_not (cp_lexer *, enum cpp_ttype); static bool cp_lexer_next_token_is_keyword (cp_lexer *, enum rid); static cp_token *cp_lexer_consume_token (cp_lexer *); static void cp_lexer_purge_token (cp_lexer *); static void cp_lexer_purge_tokens_after (cp_lexer *, cp_token_position); static void cp_lexer_save_tokens (cp_lexer *); static void cp_lexer_commit_tokens (cp_lexer *); static void cp_lexer_rollback_tokens (cp_lexer *); #ifdef ENABLE_CHECKING static void cp_lexer_print_token (FILE *, cp_token *); static inline bool cp_lexer_debugging_p (cp_lexer *); static void cp_lexer_start_debugging (cp_lexer *) ATTRIBUTE_UNUSED; static void cp_lexer_stop_debugging (cp_lexer *) ATTRIBUTE_UNUSED; #else /* If we define cp_lexer_debug_stream to NULL it will provoke warnings about passing NULL to functions that require non-NULL arguments (fputs, fprintf). It will never be used, so all we need is a value of the right type that's guaranteed not to be NULL. */ #define cp_lexer_debug_stream stdout #define cp_lexer_print_token(str, tok) (void) 0 #define cp_lexer_debugging_p(lexer) 0 #endif /* ENABLE_CHECKING */ static cp_token_cache *cp_token_cache_new (cp_token *, cp_token *); static void cp_parser_initial_pragma (cp_token *); /* Manifest constants. */ #define CP_LEXER_BUFFER_SIZE ((256 * 1024) / sizeof (cp_token)) #define CP_SAVED_TOKEN_STACK 5 /* A token type for keywords, as opposed to ordinary identifiers. */ #define CPP_KEYWORD ((enum cpp_ttype) (N_TTYPES + 1)) /* A token type for template-ids. If a template-id is processed while parsing tentatively, it is replaced with a CPP_TEMPLATE_ID token; the value of the CPP_TEMPLATE_ID is whatever was returned by cp_parser_template_id. */ #define CPP_TEMPLATE_ID ((enum cpp_ttype) (CPP_KEYWORD + 1)) /* A token type for nested-name-specifiers. If a nested-name-specifier is processed while parsing tentatively, it is replaced with a CPP_NESTED_NAME_SPECIFIER token; the value of the CPP_NESTED_NAME_SPECIFIER is whatever was returned by cp_parser_nested_name_specifier_opt. */ #define CPP_NESTED_NAME_SPECIFIER ((enum cpp_ttype) (CPP_TEMPLATE_ID + 1)) /* A token type for tokens that are not tokens at all; these are used to represent slots in the array where there used to be a token that has now been deleted. */ #define CPP_PURGED ((enum cpp_ttype) (CPP_NESTED_NAME_SPECIFIER + 1)) /* The number of token types, including C++-specific ones. */ #define N_CP_TTYPES ((int) (CPP_PURGED + 1)) /* Variables. */ #ifdef ENABLE_CHECKING /* The stream to which debugging output should be written. */ static FILE *cp_lexer_debug_stream; #endif /* ENABLE_CHECKING */ /* Create a new main C++ lexer, the lexer that gets tokens from the preprocessor. */ static cp_lexer * cp_lexer_new_main (void) { cp_token first_token; cp_lexer *lexer; cp_token *pos; size_t alloc; size_t space; cp_token *buffer; /* APPLE LOCAL begin 4137741 */ /* Tell cpplib we want CPP_BINCL and CPP_EINCL tokens. */ cpp_get_options (parse_in)->defer_file_change_debug_hooks = true; /* APPLE LOCAL end 4137741 */ /* It's possible that parsing the first pragma will load a PCH file, which is a GC collection point. So we have to do that before allocating any memory. */ cp_parser_initial_pragma (&first_token); /* APPLE LOCAL begin 4137741 */ while (first_token.type == CPP_BINCL || first_token.type == CPP_EINCL) { if (first_token.type == CPP_BINCL) (*debug_hooks->start_source_file) (TREE_INT_CST_LOW (first_token.u.value), first_token.location.file); else (*debug_hooks->end_source_file) (TREE_INT_CST_LOW (first_token.u.value)); cp_lexer_get_preprocessor_token (NULL, &first_token); } /* APPLE LOCAL end 4137741 */ /* Tell c_lex_with_flags not to merge string constants. */ c_lex_return_raw_strings = true; c_common_no_more_pch (); /* Allocate the memory. */ lexer = GGC_CNEW (cp_lexer); #ifdef ENABLE_CHECKING /* Initially we are not debugging. */ lexer->debugging_p = false; #endif /* ENABLE_CHECKING */ lexer->saved_tokens = VEC_alloc (cp_token_position, heap, CP_SAVED_TOKEN_STACK); /* Create the buffer. */ alloc = CP_LEXER_BUFFER_SIZE; buffer = GGC_NEWVEC (cp_token, alloc); /* Put the first token in the buffer. */ space = alloc; pos = buffer; *pos = first_token; /* Get the remaining tokens from the preprocessor. */ while (pos->type != CPP_EOF) { pos++; if (!--space) { space = alloc; alloc *= 2; buffer = GGC_RESIZEVEC (cp_token, buffer, alloc); pos = buffer + space; } cp_lexer_get_preprocessor_token (lexer, pos); } lexer->buffer = buffer; lexer->buffer_length = alloc - space; lexer->last_token = pos; lexer->next_token = lexer->buffer_length ? buffer : (cp_token *)&eof_token; /* Subsequent preprocessor diagnostics should use compiler diagnostic functions to get the compiler source location. */ cpp_get_options (parse_in)->client_diagnostic = true; cpp_get_callbacks (parse_in)->error = cp_cpp_error; gcc_assert (lexer->next_token->type != CPP_PURGED); return lexer; } /* Create a new lexer whose token stream is primed with the tokens in CACHE. When these tokens are exhausted, no new tokens will be read. */ static cp_lexer * cp_lexer_new_from_tokens (cp_token_cache *cache) { cp_token *first = cache->first; cp_token *last = cache->last; cp_lexer *lexer = GGC_CNEW (cp_lexer); /* We do not own the buffer. */ lexer->buffer = NULL; lexer->buffer_length = 0; lexer->next_token = first == last ? (cp_token *)&eof_token : first; lexer->last_token = last; lexer->saved_tokens = VEC_alloc (cp_token_position, heap, CP_SAVED_TOKEN_STACK); #ifdef ENABLE_CHECKING /* Initially we are not debugging. */ lexer->debugging_p = false; #endif gcc_assert (lexer->next_token->type != CPP_PURGED); return lexer; } /* Frees all resources associated with LEXER. */ static void cp_lexer_destroy (cp_lexer *lexer) { if (lexer->buffer) ggc_free (lexer->buffer); VEC_free (cp_token_position, heap, lexer->saved_tokens); ggc_free (lexer); } /* Returns nonzero if debugging information should be output. */ #ifdef ENABLE_CHECKING static inline bool cp_lexer_debugging_p (cp_lexer *lexer) { return lexer->debugging_p; } #endif /* ENABLE_CHECKING */ static inline cp_token_position cp_lexer_token_position (cp_lexer *lexer, bool previous_p) { gcc_assert (!previous_p || lexer->next_token != &eof_token); return lexer->next_token - previous_p; } static inline cp_token * cp_lexer_token_at (cp_lexer *lexer ATTRIBUTE_UNUSED, cp_token_position pos) { return pos; } /* nonzero if we are presently saving tokens. */ static inline int cp_lexer_saving_tokens (const cp_lexer* lexer) { return VEC_length (cp_token_position, lexer->saved_tokens) != 0; } /* Store the next token from the preprocessor in *TOKEN. Return true if we reach EOF. */ static void cp_lexer_get_preprocessor_token (cp_lexer *lexer ATTRIBUTE_UNUSED , cp_token *token) { static int is_extern_c = 0; /* Get a new token from the preprocessor. */ token->type /* APPLE LOCAL CW asm blocks C++ comments 6338079 */ = c_lex_with_flags (&token->u.value, &token->location, &token->flags, 1); token->input_file_stack_index = input_file_stack_tick; token->keyword = RID_MAX; token->pragma_kind = PRAGMA_NONE; token->in_system_header = in_system_header; /* On some systems, some header files are surrounded by an implicit extern "C" block. Set a flag in the token if it comes from such a header. */ is_extern_c += pending_lang_change; pending_lang_change = 0; token->implicit_extern_c = is_extern_c > 0; /* Check to see if this token is a keyword. */ if (token->type == CPP_NAME) { if (C_IS_RESERVED_WORD (token->u.value)) { /* Mark this token as a keyword. */ token->type = CPP_KEYWORD; /* Record which keyword. */ token->keyword = C_RID_CODE (token->u.value); /* Update the value. Some keywords are mapped to particular entities, rather than simply having the value of the corresponding IDENTIFIER_NODE. For example, `__const' is mapped to `const'. */ token->u.value = ridpointers[token->keyword]; } else { token->ambiguous_p = false; token->keyword = RID_MAX; } } /* Handle Objective-C++ keywords. */ else if (token->type == CPP_AT_NAME) { token->type = CPP_KEYWORD; switch (C_RID_CODE (token->u.value)) { /* Map 'class' to '@class', 'private' to '@private', etc. */ case RID_CLASS: token->keyword = RID_AT_CLASS; break; /* APPLE LOCAL radar 4564694 */ case RID_AT_PACKAGE: token->keyword = RID_AT_PACKAGE; break; case RID_PRIVATE: token->keyword = RID_AT_PRIVATE; break; case RID_PROTECTED: token->keyword = RID_AT_PROTECTED; break; case RID_PUBLIC: token->keyword = RID_AT_PUBLIC; break; case RID_THROW: token->keyword = RID_AT_THROW; break; case RID_TRY: token->keyword = RID_AT_TRY; break; case RID_CATCH: token->keyword = RID_AT_CATCH; break; default: token->keyword = C_RID_CODE (token->u.value); } } else if (token->type == CPP_PRAGMA) { /* We smuggled the cpp_token->u.pragma value in an INTEGER_CST. */ token->pragma_kind = TREE_INT_CST_LOW (token->u.value); token->u.value = NULL_TREE; } } /* Update the globals input_location and in_system_header and the input file stack from TOKEN. */ static inline void cp_lexer_set_source_position_from_token (cp_token *token) { if (token->type != CPP_EOF) { input_location = token->location; in_system_header = token->in_system_header; restore_input_file_stack (token->input_file_stack_index); } } /* APPLE LOCAL begin 4137741 */ /* Consume begin and end file marker tokens. */ static inline void cp_lexer_consume_bincl_eincl_token (cp_lexer *lexer) { while (lexer->next_token->type == CPP_BINCL || lexer->next_token->type == CPP_EINCL) { if (lexer->next_token->type == CPP_BINCL) (*debug_hooks->start_source_file) (TREE_INT_CST_LOW (lexer->next_token->u.value), lexer->next_token->location.file); else if (lexer->next_token->type == CPP_EINCL) (*debug_hooks->end_source_file) (TREE_INT_CST_LOW (lexer->next_token->u.value)); cp_lexer_purge_token (lexer); } } /* APPLE LOCAL end 4137741 */ /* Return a pointer to the next token in the token stream, but do not consume it. */ static inline cp_token * cp_lexer_peek_token (cp_lexer *lexer) { /* APPLE LOCAL begin CW asm blocks */ top: if (flag_ms_asms) if (lexer->next_token->type == CPP_NUMBER && lexer->next_token->u.value == error_mark_node && (lexer->next_token->flags & ERROR_DEFERRED)) { cp_lexer_set_source_position_from_token (lexer->next_token); /* This was previously deferred. */ lexer->next_token->flags ^= ERROR_DEFERRED; error ("invalid suffix on integer constant"); } if (!inside_iasm_block) { if (lexer->next_token->type == CPP_HASH) { cp_lexer_consume_token (lexer); error ("stray %qs in program", "#"); goto top; } else if (lexer->next_token->type == CPP_PASTE) { cp_lexer_consume_token (lexer); error ("stray %qs in program", "##"); goto top; } else if (lexer->next_token->type == CPP_OTHER) { tree value = lexer->next_token->u.value; int c; c = TREE_INT_CST_LOW (value); cp_lexer_consume_token (lexer); if (c == '"' || c == '\'') error ("missing terminating %c character", (int) c); else if (ISGRAPH (c)) error ("stray %qc in program", (int) c); else error ("stray %<\\%o%> in program", (int) c); goto top; } } /* APPLE LOCAL end CW asm blocks */ /* APPLE LOCAL 4137741 */ cp_lexer_consume_bincl_eincl_token (lexer); if (cp_lexer_debugging_p (lexer)) { fputs ("cp_lexer: peeking at token: ", cp_lexer_debug_stream); cp_lexer_print_token (cp_lexer_debug_stream, lexer->next_token); putc ('\n', cp_lexer_debug_stream); } return lexer->next_token; } /* Return true if the next token has the indicated TYPE. */ static inline bool cp_lexer_next_token_is (cp_lexer* lexer, enum cpp_ttype type) { return cp_lexer_peek_token (lexer)->type == type; } /* Return true if the next token does not have the indicated TYPE. */ static inline bool cp_lexer_next_token_is_not (cp_lexer* lexer, enum cpp_ttype type) { return !cp_lexer_next_token_is (lexer, type); } /* Return true if the next token is the indicated KEYWORD. */ static inline bool cp_lexer_next_token_is_keyword (cp_lexer* lexer, enum rid keyword) { return cp_lexer_peek_token (lexer)->keyword == keyword; } /* Return true if the next token is a keyword for a decl-specifier. */ static bool cp_lexer_next_token_is_decl_specifier_keyword (cp_lexer *lexer) { cp_token *token; token = cp_lexer_peek_token (lexer); switch (token->keyword) { /* Storage classes. */ case RID_AUTO: case RID_REGISTER: case RID_STATIC: case RID_EXTERN: case RID_MUTABLE: case RID_THREAD: /* Elaborated type specifiers. */ case RID_ENUM: case RID_CLASS: case RID_STRUCT: case RID_UNION: case RID_TYPENAME: /* Simple type specifiers. */ case RID_CHAR: case RID_WCHAR: case RID_BOOL: case RID_SHORT: case RID_INT: case RID_LONG: case RID_SIGNED: case RID_UNSIGNED: case RID_FLOAT: case RID_DOUBLE: case RID_VOID: /* GNU extensions. */ case RID_ATTRIBUTE: case RID_TYPEOF: return true; default: return false; } } /* Return a pointer to the Nth token in the token stream. If N is 1, then this is precisely equivalent to cp_lexer_peek_token (except that it is not inline). One would like to disallow that case, but there is one case (cp_parser_nth_token_starts_template_id) where the caller passes a variable for N and it might be 1. */ static cp_token * cp_lexer_peek_nth_token (cp_lexer* lexer, size_t n) { cp_token *token; /* N is 1-based, not zero-based. */ gcc_assert (n > 0); if (cp_lexer_debugging_p (lexer)) fprintf (cp_lexer_debug_stream, "cp_lexer: peeking ahead %ld at token: ", (long)n); --n; token = lexer->next_token; gcc_assert (!n || token != &eof_token); while (n != 0) { ++token; if (token == lexer->last_token) { token = (cp_token *)&eof_token; break; } /* APPLE LOCAL begin 4137741 */ if (token->type != CPP_PURGED && token->type != CPP_BINCL && token->type != CPP_EINCL) /* APPLE LOCAL end 4137741 */ --n; } if (cp_lexer_debugging_p (lexer)) { cp_lexer_print_token (cp_lexer_debug_stream, token); putc ('\n', cp_lexer_debug_stream); } return token; } /* Return the next token, and advance the lexer's next_token pointer to point to the next non-purged token. */ static cp_token * cp_lexer_consume_token (cp_lexer* lexer) { cp_token *token = lexer->next_token; gcc_assert (token != &eof_token); gcc_assert (!lexer->in_pragma || token->type != CPP_PRAGMA_EOL); do { lexer->next_token++; /* APPLE LOCAL 4137741 */ cp_lexer_consume_bincl_eincl_token (lexer); if (lexer->next_token == lexer->last_token) { lexer->next_token = (cp_token *)&eof_token; break; } } while (lexer->next_token->type == CPP_PURGED); cp_lexer_set_source_position_from_token (token); /* Provide debugging output. */ if (cp_lexer_debugging_p (lexer)) { fputs ("cp_lexer: consuming token: ", cp_lexer_debug_stream); cp_lexer_print_token (cp_lexer_debug_stream, token); putc ('\n', cp_lexer_debug_stream); } return token; } /* Permanently remove the next token from the token stream, and advance the next_token pointer to refer to the next non-purged token. */ static void cp_lexer_purge_token (cp_lexer *lexer) { cp_token *tok = lexer->next_token; gcc_assert (tok != &eof_token); tok->type = CPP_PURGED; tok->location = UNKNOWN_LOCATION; tok->u.value = NULL_TREE; tok->keyword = RID_MAX; do { tok++; if (tok == lexer->last_token) { tok = (cp_token *)&eof_token; break; } } while (tok->type == CPP_PURGED); lexer->next_token = tok; } /* Permanently remove all tokens after TOK, up to, but not including, the token that will be returned next by cp_lexer_peek_token. */ static void cp_lexer_purge_tokens_after (cp_lexer *lexer, cp_token *tok) { cp_token *peek = lexer->next_token; if (peek == &eof_token) peek = lexer->last_token; gcc_assert (tok < peek); for ( tok += 1; tok != peek; tok += 1) { tok->type = CPP_PURGED; tok->location = UNKNOWN_LOCATION; tok->u.value = NULL_TREE; tok->keyword = RID_MAX; } } /* Begin saving tokens. All tokens consumed after this point will be preserved. */ static void cp_lexer_save_tokens (cp_lexer* lexer) { /* Provide debugging output. */ if (cp_lexer_debugging_p (lexer)) fprintf (cp_lexer_debug_stream, "cp_lexer: saving tokens\n"); VEC_safe_push (cp_token_position, heap, lexer->saved_tokens, lexer->next_token); } /* Commit to the portion of the token stream most recently saved. */ static void cp_lexer_commit_tokens (cp_lexer* lexer) { /* Provide debugging output. */ if (cp_lexer_debugging_p (lexer)) fprintf (cp_lexer_debug_stream, "cp_lexer: committing tokens\n"); VEC_pop (cp_token_position, lexer->saved_tokens); } /* Return all tokens saved since the last call to cp_lexer_save_tokens to the token stream. Stop saving tokens. */ static void cp_lexer_rollback_tokens (cp_lexer* lexer) { /* Provide debugging output. */ if (cp_lexer_debugging_p (lexer)) fprintf (cp_lexer_debug_stream, "cp_lexer: restoring tokens\n"); lexer->next_token = VEC_pop (cp_token_position, lexer->saved_tokens); } /* Print a representation of the TOKEN on the STREAM. */ #ifdef ENABLE_CHECKING static void cp_lexer_print_token (FILE * stream, cp_token *token) { /* We don't use cpp_type2name here because the parser defines a few tokens of its own. */ static const char *const token_names[] = { /* cpplib-defined token types */ #define OP(e, s) #e, #define TK(e, s) #e, TTYPE_TABLE #undef OP #undef TK /* C++ parser token types - see "Manifest constants", above. */ "KEYWORD", "TEMPLATE_ID", "NESTED_NAME_SPECIFIER", "PURGED" }; /* If we have a name for the token, print it out. Otherwise, we simply give the numeric code. */ gcc_assert (token->type < ARRAY_SIZE(token_names)); fputs (token_names[token->type], stream); /* For some tokens, print the associated data. */ switch (token->type) { case CPP_KEYWORD: /* Some keywords have a value that is not an IDENTIFIER_NODE. For example, `struct' is mapped to an INTEGER_CST. */ if (TREE_CODE (token->u.value) != IDENTIFIER_NODE) break; /* else fall through */ case CPP_NAME: fputs (IDENTIFIER_POINTER (token->u.value), stream); break; case CPP_STRING: case CPP_WSTRING: fprintf (stream, " \"%s\"", TREE_STRING_POINTER (token->u.value)); break; default: break; } } /* Start emitting debugging information. */ static void cp_lexer_start_debugging (cp_lexer* lexer) { lexer->debugging_p = true; } /* Stop emitting debugging information. */ static void cp_lexer_stop_debugging (cp_lexer* lexer) { lexer->debugging_p = false; } #endif /* ENABLE_CHECKING */ /* Create a new cp_token_cache, representing a range of tokens. */ static cp_token_cache * cp_token_cache_new (cp_token *first, cp_token *last) { cp_token_cache *cache = GGC_NEW (cp_token_cache); cache->first = first; cache->last = last; return cache; } /* Decl-specifiers. */ /* Set *DECL_SPECS to represent an empty decl-specifier-seq. */ static void clear_decl_specs (cp_decl_specifier_seq *decl_specs) { memset (decl_specs, 0, sizeof (cp_decl_specifier_seq)); } /* Declarators. */ /* Nothing other than the parser should be creating declarators; declarators are a semi-syntactic representation of C++ entities. Other parts of the front end that need to create entities (like VAR_DECLs or FUNCTION_DECLs) should do that directly. */ static cp_declarator *make_call_declarator (cp_declarator *, cp_parameter_declarator *, cp_cv_quals, tree); static cp_declarator *make_array_declarator (cp_declarator *, tree); static cp_declarator *make_pointer_declarator (cp_cv_quals, cp_declarator *); static cp_declarator *make_reference_declarator (cp_cv_quals, cp_declarator *); static cp_parameter_declarator *make_parameter_declarator (cp_decl_specifier_seq *, cp_declarator *, tree); static cp_declarator *make_ptrmem_declarator (cp_cv_quals, tree, cp_declarator *); /* An erroneous declarator. */ static cp_declarator *cp_error_declarator; /* The obstack on which declarators and related data structures are allocated. */ static struct obstack declarator_obstack; /* Alloc BYTES from the declarator memory pool. */ static inline void * alloc_declarator (size_t bytes) { return obstack_alloc (&declarator_obstack, bytes); } /* Allocate a declarator of the indicated KIND. Clear fields that are common to all declarators. */ static cp_declarator * make_declarator (cp_declarator_kind kind) { cp_declarator *declarator; declarator = (cp_declarator *) alloc_declarator (sizeof (cp_declarator)); declarator->kind = kind; declarator->attributes = NULL_TREE; declarator->declarator = NULL; return declarator; } /* Make a declarator for a generalized identifier. If QUALIFYING_SCOPE is non-NULL, the identifier is QUALIFYING_SCOPE::UNQUALIFIED_NAME; otherwise, it is just UNQUALIFIED_NAME. SFK indicates the kind of special function this is, if any. */ static cp_declarator * make_id_declarator (tree qualifying_scope, tree unqualified_name, special_function_kind sfk) { cp_declarator *declarator; /* It is valid to write: class C { void f(); }; typedef C D; void D::f(); The standard is not clear about whether `typedef const C D' is legal; as of 2002-09-15 the committee is considering that question. EDG 3.0 allows that syntax. Therefore, we do as well. */ if (qualifying_scope && TYPE_P (qualifying_scope)) qualifying_scope = TYPE_MAIN_VARIANT (qualifying_scope); gcc_assert (TREE_CODE (unqualified_name) == IDENTIFIER_NODE || TREE_CODE (unqualified_name) == BIT_NOT_EXPR || TREE_CODE (unqualified_name) == TEMPLATE_ID_EXPR); declarator = make_declarator (cdk_id); declarator->u.id.qualifying_scope = qualifying_scope; declarator->u.id.unqualified_name = unqualified_name; declarator->u.id.sfk = sfk; return declarator; } /* Make a declarator for a pointer to TARGET. CV_QUALIFIERS is a list of modifiers such as const or volatile to apply to the pointer type, represented as identifiers. */ cp_declarator * make_pointer_declarator (cp_cv_quals cv_qualifiers, cp_declarator *target) { cp_declarator *declarator; declarator = make_declarator (cdk_pointer); declarator->declarator = target; declarator->u.pointer.qualifiers = cv_qualifiers; declarator->u.pointer.class_type = NULL_TREE; return declarator; } /* Like make_pointer_declarator -- but for references. */ cp_declarator * make_reference_declarator (cp_cv_quals cv_qualifiers, cp_declarator *target) { cp_declarator *declarator; declarator = make_declarator (cdk_reference); declarator->declarator = target; declarator->u.pointer.qualifiers = cv_qualifiers; declarator->u.pointer.class_type = NULL_TREE; return declarator; } /* Like make_pointer_declarator -- but for a pointer to a non-static member of CLASS_TYPE. */ cp_declarator * make_ptrmem_declarator (cp_cv_quals cv_qualifiers, tree class_type, cp_declarator *pointee) { cp_declarator *declarator; declarator = make_declarator (cdk_ptrmem); declarator->declarator = pointee; declarator->u.pointer.qualifiers = cv_qualifiers; declarator->u.pointer.class_type = class_type; return declarator; } /* Make a declarator for the function given by TARGET, with the indicated PARMS. The CV_QUALIFIERS aply to the function, as in "const"-qualified member function. The EXCEPTION_SPECIFICATION indicates what exceptions can be thrown. */ cp_declarator * make_call_declarator (cp_declarator *target, cp_parameter_declarator *parms, cp_cv_quals cv_qualifiers, tree exception_specification) { cp_declarator *declarator; declarator = make_declarator (cdk_function); declarator->declarator = target; declarator->u.function.parameters = parms; declarator->u.function.qualifiers = cv_qualifiers; declarator->u.function.exception_specification = exception_specification; return declarator; } /* Make a declarator for an array of BOUNDS elements, each of which is defined by ELEMENT. */ cp_declarator * make_array_declarator (cp_declarator *element, tree bounds) { cp_declarator *declarator; declarator = make_declarator (cdk_array); declarator->declarator = element; declarator->u.array.bounds = bounds; return declarator; } cp_parameter_declarator *no_parameters; /* Create a parameter declarator with the indicated DECL_SPECIFIERS, DECLARATOR and DEFAULT_ARGUMENT. */ cp_parameter_declarator * make_parameter_declarator (cp_decl_specifier_seq *decl_specifiers, cp_declarator *declarator, tree default_argument) { cp_parameter_declarator *parameter; parameter = ((cp_parameter_declarator *) alloc_declarator (sizeof (cp_parameter_declarator))); parameter->next = NULL; if (decl_specifiers) parameter->decl_specifiers = *decl_specifiers; else clear_decl_specs (&parameter->decl_specifiers); parameter->declarator = declarator; parameter->default_argument = default_argument; parameter->ellipsis_p = false; return parameter; } /* Returns true iff DECLARATOR is a declaration for a function. */ static bool function_declarator_p (const cp_declarator *declarator) { while (declarator) { if (declarator->kind == cdk_function && declarator->declarator->kind == cdk_id) return true; if (declarator->kind == cdk_id || declarator->kind == cdk_error) return false; declarator = declarator->declarator; } return false; } /* The parser. */ /* Overview -------- A cp_parser parses the token stream as specified by the C++ grammar. Its job is purely parsing, not semantic analysis. For example, the parser breaks the token stream into declarators, expressions, statements, and other similar syntactic constructs. It does not check that the types of the expressions on either side of an assignment-statement are compatible, or that a function is not declared with a parameter of type `void'. The parser invokes routines elsewhere in the compiler to perform semantic analysis and to build up the abstract syntax tree for the code processed. The parser (and the template instantiation code, which is, in a way, a close relative of parsing) are the only parts of the compiler that should be calling push_scope and pop_scope, or related functions. The parser (and template instantiation code) keeps track of what scope is presently active; everything else should simply honor that. (The code that generates static initializers may also need to set the scope, in order to check access control correctly when emitting the initializers.) Methodology ----------- The parser is of the standard recursive-descent variety. Upcoming tokens in the token stream are examined in order to determine which production to use when parsing a non-terminal. Some C++ constructs require arbitrary look ahead to disambiguate. For example, it is impossible, in the general case, to tell whether a statement is an expression or declaration without scanning the entire statement. Therefore, the parser is capable of "parsing tentatively." When the parser is not sure what construct comes next, it enters this mode. Then, while we attempt to parse the construct, the parser queues up error messages, rather than issuing them immediately, and saves the tokens it consumes. If the construct is parsed successfully, the parser "commits", i.e., it issues any queued error messages and the tokens that were being preserved are permanently discarded. If, however, the construct is not parsed successfully, the parser rolls back its state completely so that it can resume parsing using a different alternative. Future Improvements ------------------- The performance of the parser could probably be improved substantially. We could often eliminate the need to parse tentatively by looking ahead a little bit. In some places, this approach might not entirely eliminate the need to parse tentatively, but it might still speed up the average case. */ /* Flags that are passed to some parsing functions. These values can be bitwise-ored together. */ typedef enum cp_parser_flags { /* No flags. */ CP_PARSER_FLAGS_NONE = 0x0, /* The construct is optional. If it is not present, then no error should be issued. */ CP_PARSER_FLAGS_OPTIONAL = 0x1, /* When parsing a type-specifier, do not allow user-defined types. */ CP_PARSER_FLAGS_NO_USER_DEFINED_TYPES = 0x2 } cp_parser_flags; /* The different kinds of declarators we want to parse. */ typedef enum cp_parser_declarator_kind { /* APPLE LOCAL begin blocks 6339747 */ /* We want a block declarator. */ CP_PARSER_DECLARATOR_BLOCK, /* APPLE LOCAL end blocks 6339747 */ /* We want an abstract declarator. */ CP_PARSER_DECLARATOR_ABSTRACT, /* We want a named declarator. */ CP_PARSER_DECLARATOR_NAMED, /* We don't mind, but the name must be an unqualified-id. */ CP_PARSER_DECLARATOR_EITHER } cp_parser_declarator_kind; /* The precedence values used to parse binary expressions. The minimum value of PREC must be 1, because zero is reserved to quickly discriminate binary operators from other tokens. */ enum cp_parser_prec { PREC_NOT_OPERATOR, PREC_LOGICAL_OR_EXPRESSION, PREC_LOGICAL_AND_EXPRESSION, PREC_INCLUSIVE_OR_EXPRESSION, PREC_EXCLUSIVE_OR_EXPRESSION, PREC_AND_EXPRESSION, PREC_EQUALITY_EXPRESSION, PREC_RELATIONAL_EXPRESSION, PREC_SHIFT_EXPRESSION, PREC_ADDITIVE_EXPRESSION, PREC_MULTIPLICATIVE_EXPRESSION, PREC_PM_EXPRESSION, NUM_PREC_VALUES = PREC_PM_EXPRESSION }; /* A mapping from a token type to a corresponding tree node type, with a precedence value. */ typedef struct cp_parser_binary_operations_map_node { /* The token type. */ enum cpp_ttype token_type; /* The corresponding tree code. */ enum tree_code tree_type; /* The precedence of this operator. */ enum cp_parser_prec prec; } cp_parser_binary_operations_map_node; /* The status of a tentative parse. */ typedef enum cp_parser_status_kind { /* No errors have occurred. */ CP_PARSER_STATUS_KIND_NO_ERROR, /* An error has occurred. */ CP_PARSER_STATUS_KIND_ERROR, /* We are committed to this tentative parse, whether or not an error has occurred. */ CP_PARSER_STATUS_KIND_COMMITTED } cp_parser_status_kind; typedef struct cp_parser_expression_stack_entry { tree lhs; enum tree_code tree_type; int prec; } cp_parser_expression_stack_entry; /* The stack for storing partial expressions. We only need NUM_PREC_VALUES entries because precedence levels on the stack are monotonically increasing. */ typedef struct cp_parser_expression_stack_entry cp_parser_expression_stack[NUM_PREC_VALUES]; /* Context that is saved and restored when parsing tentatively. */ typedef struct cp_parser_context GTY (()) { /* If this is a tentative parsing context, the status of the tentative parse. */ enum cp_parser_status_kind status; /* If non-NULL, we have just seen a `x->' or `x.' expression. Names that are looked up in this context must be looked up both in the scope given by OBJECT_TYPE (the type of `x' or `*x') and also in the context of the containing expression. */ tree object_type; /* The next parsing context in the stack. */ struct cp_parser_context *next; } cp_parser_context; /* Prototypes. */ /* Constructors and destructors. */ static cp_parser_context *cp_parser_context_new (cp_parser_context *); /* Class variables. */ static GTY((deletable)) cp_parser_context* cp_parser_context_free_list; /* The operator-precedence table used by cp_parser_binary_expression. Transformed into an associative array (binops_by_token) by cp_parser_new. */ static const cp_parser_binary_operations_map_node binops[] = { { CPP_DEREF_STAR, MEMBER_REF, PREC_PM_EXPRESSION }, { CPP_DOT_STAR, DOTSTAR_EXPR, PREC_PM_EXPRESSION }, { CPP_MULT, MULT_EXPR, PREC_MULTIPLICATIVE_EXPRESSION }, { CPP_DIV, TRUNC_DIV_EXPR, PREC_MULTIPLICATIVE_EXPRESSION }, { CPP_MOD, TRUNC_MOD_EXPR, PREC_MULTIPLICATIVE_EXPRESSION }, { CPP_PLUS, PLUS_EXPR, PREC_ADDITIVE_EXPRESSION }, { CPP_MINUS, MINUS_EXPR, PREC_ADDITIVE_EXPRESSION }, { CPP_LSHIFT, LSHIFT_EXPR, PREC_SHIFT_EXPRESSION }, { CPP_RSHIFT, RSHIFT_EXPR, PREC_SHIFT_EXPRESSION }, { CPP_LESS, LT_EXPR, PREC_RELATIONAL_EXPRESSION }, { CPP_GREATER, GT_EXPR, PREC_RELATIONAL_EXPRESSION }, { CPP_LESS_EQ, LE_EXPR, PREC_RELATIONAL_EXPRESSION }, { CPP_GREATER_EQ, GE_EXPR, PREC_RELATIONAL_EXPRESSION }, { CPP_EQ_EQ, EQ_EXPR, PREC_EQUALITY_EXPRESSION }, { CPP_NOT_EQ, NE_EXPR, PREC_EQUALITY_EXPRESSION }, { CPP_AND, BIT_AND_EXPR, PREC_AND_EXPRESSION }, { CPP_XOR, BIT_XOR_EXPR, PREC_EXCLUSIVE_OR_EXPRESSION }, { CPP_OR, BIT_IOR_EXPR, PREC_INCLUSIVE_OR_EXPRESSION }, { CPP_AND_AND, TRUTH_ANDIF_EXPR, PREC_LOGICAL_AND_EXPRESSION }, { CPP_OR_OR, TRUTH_ORIF_EXPR, PREC_LOGICAL_OR_EXPRESSION } }; /* The same as binops, but initialized by cp_parser_new so that binops_by_token[N].token_type == N. Used in cp_parser_binary_expression for speed. */ static cp_parser_binary_operations_map_node binops_by_token[N_CP_TTYPES]; /* Constructors and destructors. */ /* Construct a new context. The context below this one on the stack is given by NEXT. */ static cp_parser_context * cp_parser_context_new (cp_parser_context* next) { cp_parser_context *context; /* Allocate the storage. */ if (cp_parser_context_free_list != NULL) { /* Pull the first entry from the free list. */ context = cp_parser_context_free_list; cp_parser_context_free_list = context->next; memset (context, 0, sizeof (*context)); } else context = GGC_CNEW (cp_parser_context); /* No errors have occurred yet in this context. */ context->status = CP_PARSER_STATUS_KIND_NO_ERROR; /* If this is not the bottomost context, copy information that we need from the previous context. */ if (next) { /* If, in the NEXT context, we are parsing an `x->' or `x.' expression, then we are parsing one in this context, too. */ context->object_type = next->object_type; /* Thread the stack. */ context->next = next; } return context; } /* The cp_parser structure represents the C++ parser. */ typedef struct cp_parser GTY(()) { /* The lexer from which we are obtaining tokens. */ cp_lexer *lexer; /* The scope in which names should be looked up. If NULL_TREE, then we look up names in the scope that is currently open in the source program. If non-NULL, this is either a TYPE or NAMESPACE_DECL for the scope in which we should look. It can also be ERROR_MARK, when we've parsed a bogus scope. This value is not cleared automatically after a name is looked up, so we must be careful to clear it before starting a new look up sequence. (If it is not cleared, then `X::Y' followed by `Z' will look up `Z' in the scope of `X', rather than the current scope.) Unfortunately, it is difficult to tell when name lookup is complete, because we sometimes peek at a token, look it up, and then decide not to consume it. */ tree scope; /* OBJECT_SCOPE and QUALIFYING_SCOPE give the scopes in which the last lookup took place. OBJECT_SCOPE is used if an expression like "x->y" or "x.y" was used; it gives the type of "*x" or "x", respectively. QUALIFYING_SCOPE is used for an expression of the form "X::Y"; it refers to X. */ tree object_scope; tree qualifying_scope; /* A stack of parsing contexts. All but the bottom entry on the stack will be tentative contexts. We parse tentatively in order to determine which construct is in use in some situations. For example, in order to determine whether a statement is an expression-statement or a declaration-statement we parse it tentatively as a declaration-statement. If that fails, we then reparse the same token stream as an expression-statement. */ cp_parser_context *context; /* True if we are parsing GNU C++. If this flag is not set, then GNU extensions are not recognized. */ bool allow_gnu_extensions_p; /* TRUE if the `>' token should be interpreted as the greater-than operator. FALSE if it is the end of a template-id or template-parameter-list. */ bool greater_than_is_operator_p; /* TRUE if default arguments are allowed within a parameter list that starts at this point. FALSE if only a gnu extension makes them permissible. */ bool default_arg_ok_p; /* TRUE if we are parsing an integral constant-expression. See [expr.const] for a precise definition. */ bool integral_constant_expression_p; /* TRUE if we are parsing an integral constant-expression -- but a non-constant expression should be permitted as well. This flag is used when parsing an array bound so that GNU variable-length arrays are tolerated. */ bool allow_non_integral_constant_expression_p; /* TRUE if ALLOW_NON_CONSTANT_EXPRESSION_P is TRUE and something has been seen that makes the expression non-constant. */ bool non_integral_constant_expression_p; /* TRUE if local variable names and `this' are forbidden in the current context. */ bool local_variables_forbidden_p; /* TRUE if the declaration we are parsing is part of a linkage-specification of the form `extern string-literal declaration'. */ bool in_unbraced_linkage_specification_p; /* TRUE if we are presently parsing a declarator, after the direct-declarator. */ bool in_declarator_p; /* TRUE if we are presently parsing a template-argument-list. */ bool in_template_argument_list_p; /* Set to IN_ITERATION_STMT if parsing an iteration-statement, to IN_OMP_BLOCK if parsing OpenMP structured block and IN_OMP_FOR if parsing OpenMP loop. If parsing a switch statement, this is bitwise ORed with IN_SWITCH_STMT, unless parsing an iteration-statement, OpenMP block or loop within that switch. */ #define IN_SWITCH_STMT 1 #define IN_ITERATION_STMT 2 #define IN_OMP_BLOCK 4 #define IN_OMP_FOR 8 unsigned char in_statement; /* TRUE if we are presently parsing the body of a switch statement. Note that this doesn't quite overlap with in_statement above. The difference relates to giving the right sets of error messages: "case not in switch" vs "break statement used with OpenMP...". */ bool in_switch_statement_p; /* TRUE if we are parsing a type-id in an expression context. In such a situation, both "type (expr)" and "type (type)" are valid alternatives. */ bool in_type_id_in_expr_p; /* TRUE if we are currently in a header file where declarations are implicitly extern "C". */ bool implicit_extern_c; /* TRUE if strings in expressions should be translated to the execution character set. */ bool translate_strings_p; /* TRUE if we are presently parsing the body of a function, but not a local class. */ bool in_function_body; /* If non-NULL, then we are parsing a construct where new type definitions are not permitted. The string stored here will be issued as an error message if a type is defined. */ const char *type_definition_forbidden_message; /* A list of lists. The outer list is a stack, used for member functions of local classes. At each level there are two sub-list, one on TREE_VALUE and one on TREE_PURPOSE. Each of those sub-lists has a FUNCTION_DECL or TEMPLATE_DECL on their TREE_VALUE's. The functions are chained in reverse declaration order. The TREE_PURPOSE sublist contains those functions with default arguments that need post processing, and the TREE_VALUE sublist contains those functions with definitions that need post processing. These lists can only be processed once the outermost class being defined is complete. */ tree unparsed_functions_queues; /* The number of classes whose definitions are currently in progress. */ unsigned num_classes_being_defined; /* The number of template parameter lists that apply directly to the current declaration. */ unsigned num_template_parameter_lists; } cp_parser; /* Prototypes. */ /* Constructors and destructors. */ static cp_parser *cp_parser_new (void); /* Routines to parse various constructs. Those that return `tree' will return the error_mark_node (rather than NULL_TREE) if a parse error occurs, unless otherwise noted. Sometimes, they will return an ordinary node if error-recovery was attempted, even though a parse error occurred. So, to check whether or not a parse error occurred, you should always use cp_parser_error_occurred. If the construct is optional (indicated either by an `_opt' in the name of the function that does the parsing or via a FLAGS parameter), then NULL_TREE is returned if the construct is not present. */ /* Lexical conventions [gram.lex] */ static tree cp_parser_identifier (cp_parser *); static tree cp_parser_string_literal (cp_parser *, bool, bool); /* Basic concepts [gram.basic] */ static bool cp_parser_translation_unit (cp_parser *); /* Expressions [gram.expr] */ static tree cp_parser_primary_expression (cp_parser *, bool, bool, bool, cp_id_kind *); static tree cp_parser_id_expression (cp_parser *, bool, bool, bool *, bool, bool); static tree cp_parser_unqualified_id (cp_parser *, bool, bool, bool, bool); static tree cp_parser_nested_name_specifier_opt (cp_parser *, bool, bool, bool, bool); static tree cp_parser_nested_name_specifier (cp_parser *, bool, bool, bool, bool); static tree cp_parser_class_or_namespace_name (cp_parser *, bool, bool, bool, bool, bool); static tree cp_parser_postfix_expression (cp_parser *, bool, bool); static tree cp_parser_postfix_open_square_expression (cp_parser *, tree, bool); static tree cp_parser_postfix_dot_deref_expression (cp_parser *, enum cpp_ttype, tree, bool, cp_id_kind *); static tree cp_parser_parenthesized_expression_list (cp_parser *, bool, bool, bool *); static void cp_parser_pseudo_destructor_name (cp_parser *, tree *, tree *); static tree cp_parser_unary_expression (cp_parser *, bool, bool); static enum tree_code cp_parser_unary_operator (cp_token *); static tree cp_parser_new_expression (cp_parser *); static tree cp_parser_new_placement (cp_parser *); static tree cp_parser_new_type_id (cp_parser *, tree *); static cp_declarator *cp_parser_new_declarator_opt (cp_parser *); static cp_declarator *cp_parser_direct_new_declarator (cp_parser *); static tree cp_parser_new_initializer (cp_parser *); static tree cp_parser_delete_expression (cp_parser *); static tree cp_parser_cast_expression (cp_parser *, bool, bool); static tree cp_parser_binary_expression (cp_parser *, bool); static tree cp_parser_question_colon_clause (cp_parser *, tree); static tree cp_parser_assignment_expression (cp_parser *, bool); static enum tree_code cp_parser_assignment_operator_opt (cp_parser *); static tree cp_parser_expression (cp_parser *, bool); static tree cp_parser_constant_expression (cp_parser *, bool, bool *); static tree cp_parser_builtin_offsetof (cp_parser *); /* APPLE LOCAL begin blocks 6040305 (ca) */ static tree cp_parser_block_literal_expr (cp_parser *); /* APPLE LOCAL end blocks 6040305 (ca) */ /* APPLE LOCAL begin C* language */ static void objc_foreach_stmt (cp_parser *, tree); /* APPLE LOCAL end C* language */ /* APPLE LOCAL begin C* property (Radar 4436866) */ static void objc_cp_parser_at_property (cp_parser *); static void objc_cp_parse_property_decl (cp_parser *); /* APPLE LOCAL end C* property (Radar 4436866) */ /* APPLE LOCAL begin objc new property */ static void objc_cp_parser_property_impl (cp_parser *parser, enum rid keyword); /* APPLE LOCAL end objc new property */ /* APPLE LOCAL begin radar 4548636 */ static bool objc_attr_follwed_by_at_keyword (cp_parser *); /* APPLE LOCAL end radar 4548636 */ /* Statements [gram.stmt.stmt] */ static void cp_parser_statement (cp_parser *, tree, bool); static void cp_parser_label_for_labeled_statement (cp_parser *); static tree cp_parser_expression_statement (cp_parser *, tree); static tree cp_parser_compound_statement /* APPLE LOCAL radar 5982990 */ (cp_parser *, tree, bool, bool); static void cp_parser_statement_seq_opt (cp_parser *, tree); static tree cp_parser_selection_statement (cp_parser *); static tree cp_parser_condition (cp_parser *); static tree cp_parser_iteration_statement (cp_parser *); static void cp_parser_for_init_statement (cp_parser *); static tree cp_parser_jump_statement (cp_parser *); static void cp_parser_declaration_statement (cp_parser *); static tree cp_parser_implicitly_scoped_statement (cp_parser *); static void cp_parser_already_scoped_statement (cp_parser *); /* Declarations [gram.dcl.dcl] */ static void cp_parser_declaration_seq_opt (cp_parser *); static void cp_parser_declaration (cp_parser *); static void cp_parser_block_declaration (cp_parser *, bool); static void cp_parser_simple_declaration (cp_parser *, bool); static void cp_parser_decl_specifier_seq (cp_parser *, cp_parser_flags, cp_decl_specifier_seq *, int *); static tree cp_parser_storage_class_specifier_opt (cp_parser *); static tree cp_parser_function_specifier_opt (cp_parser *, cp_decl_specifier_seq *); static tree cp_parser_type_specifier (cp_parser *, cp_parser_flags, cp_decl_specifier_seq *, bool, int *, bool *); static tree cp_parser_simple_type_specifier (cp_parser *, cp_decl_specifier_seq *, cp_parser_flags); static tree cp_parser_type_name (cp_parser *); static tree cp_parser_elaborated_type_specifier (cp_parser *, bool, bool); static tree cp_parser_enum_specifier (cp_parser *); static void cp_parser_enumerator_list (cp_parser *, tree); static void cp_parser_enumerator_definition (cp_parser *, tree); static tree cp_parser_namespace_name (cp_parser *); static void cp_parser_namespace_definition (cp_parser *); static void cp_parser_namespace_body (cp_parser *); static tree cp_parser_qualified_namespace_specifier (cp_parser *); static void cp_parser_namespace_alias_definition (cp_parser *); static bool cp_parser_using_declaration (cp_parser *, bool); static void cp_parser_using_directive (cp_parser *); static void cp_parser_asm_definition /* APPLE LOCAL CW asm blocks */ (cp_parser *, bool); static void cp_parser_linkage_specification (cp_parser *); /* Declarators [gram.dcl.decl] */ static tree cp_parser_init_declarator (cp_parser *, cp_decl_specifier_seq *, VEC (deferred_access_check,gc)*, bool, bool, int, bool *); static cp_declarator *cp_parser_declarator (cp_parser *, cp_parser_declarator_kind, int *, bool *, bool); static cp_declarator *cp_parser_direct_declarator (cp_parser *, cp_parser_declarator_kind, int *, bool); static enum tree_code cp_parser_ptr_operator (cp_parser *, tree *, cp_cv_quals *); static cp_cv_quals cp_parser_cv_qualifier_seq_opt (cp_parser *); static tree cp_parser_declarator_id (cp_parser *, bool); static tree cp_parser_type_id (cp_parser *); static void cp_parser_type_specifier_seq (cp_parser *, bool, cp_decl_specifier_seq *); static cp_parameter_declarator *cp_parser_parameter_declaration_clause (cp_parser *); static cp_parameter_declarator *cp_parser_parameter_declaration_list (cp_parser *, bool *); static cp_parameter_declarator *cp_parser_parameter_declaration (cp_parser *, bool, bool *); static void cp_parser_function_body (cp_parser *); static tree cp_parser_initializer (cp_parser *, bool *, bool *); static tree cp_parser_initializer_clause (cp_parser *, bool *); static VEC(constructor_elt,gc) *cp_parser_initializer_list (cp_parser *, bool *); static bool cp_parser_ctor_initializer_opt_and_function_body (cp_parser *); /* Classes [gram.class] */ static tree cp_parser_class_name (cp_parser *, bool, bool, enum tag_types, bool, bool, bool); static tree cp_parser_class_specifier (cp_parser *); static tree cp_parser_class_head (cp_parser *, bool *, tree *, tree *); static enum tag_types cp_parser_class_key (cp_parser *); static void cp_parser_member_specification_opt (cp_parser *); static void cp_parser_member_declaration (cp_parser *); static tree cp_parser_pure_specifier (cp_parser *); static tree cp_parser_constant_initializer (cp_parser *); /* Derived classes [gram.class.derived] */ static tree cp_parser_base_clause (cp_parser *); static tree cp_parser_base_specifier (cp_parser *); /* Special member functions [gram.special] */ static tree cp_parser_conversion_function_id (cp_parser *); static tree cp_parser_conversion_type_id (cp_parser *); static cp_declarator *cp_parser_conversion_declarator_opt (cp_parser *); static bool cp_parser_ctor_initializer_opt (cp_parser *); static void cp_parser_mem_initializer_list (cp_parser *); static tree cp_parser_mem_initializer (cp_parser *); static tree cp_parser_mem_initializer_id (cp_parser *); /* Overloading [gram.over] */ static tree cp_parser_operator_function_id (cp_parser *); static tree cp_parser_operator (cp_parser *); /* Templates [gram.temp] */ static void cp_parser_template_declaration (cp_parser *, bool); static tree cp_parser_template_parameter_list (cp_parser *); static tree cp_parser_template_parameter (cp_parser *, bool *); static tree cp_parser_type_parameter (cp_parser *); static tree cp_parser_template_id (cp_parser *, bool, bool, bool); static tree cp_parser_template_name (cp_parser *, bool, bool, bool, bool *); static tree cp_parser_template_argument_list (cp_parser *); static tree cp_parser_template_argument (cp_parser *); static void cp_parser_explicit_instantiation (cp_parser *); static void cp_parser_explicit_specialization (cp_parser *); /* Exception handling [gram.exception] */ static tree cp_parser_try_block (cp_parser *); static bool cp_parser_function_try_block (cp_parser *); static void cp_parser_handler_seq (cp_parser *); static void cp_parser_handler (cp_parser *); static tree cp_parser_exception_declaration (cp_parser *); static tree cp_parser_throw_expression (cp_parser *); static tree cp_parser_exception_specification_opt (cp_parser *); static tree cp_parser_type_id_list (cp_parser *); /* GNU Extensions */ static tree cp_parser_asm_specification_opt (cp_parser *); static tree cp_parser_asm_operand_list (cp_parser *); static tree cp_parser_asm_clobber_list (cp_parser *); static tree cp_parser_attributes_opt (cp_parser *); static tree cp_parser_attribute_list (cp_parser *); static bool cp_parser_extension_opt (cp_parser *, int *); static void cp_parser_label_declaration (cp_parser *); enum pragma_context { pragma_external, pragma_stmt, pragma_compound }; static bool cp_parser_pragma (cp_parser *, enum pragma_context); /* Objective-C++ Productions */ static tree cp_parser_objc_message_receiver (cp_parser *); static tree cp_parser_objc_message_args (cp_parser *); static tree cp_parser_objc_message_expression (cp_parser *); /* APPLE LOCAL begin radar 5277239 */ static tree cp_parser_objc_reference_expression (cp_parser *, tree); /* APPLE LOCAL end radar 5277239 */ static tree cp_parser_objc_encode_expression (cp_parser *); static tree cp_parser_objc_defs_expression (cp_parser *); static tree cp_parser_objc_protocol_expression (cp_parser *); static tree cp_parser_objc_selector_expression (cp_parser *); static tree cp_parser_objc_expression (cp_parser *); static bool cp_parser_objc_selector_p (enum cpp_ttype); static tree cp_parser_objc_selector (cp_parser *); /* APPLE LOCAL begin radar 3803157 - objc attribute */ static void cp_parser_objc_maybe_attributes (cp_parser *, tree *); static tree cp_parser_objc_identifier_list (cp_parser *); /* APPLE LOCAL end radar 3803157 - objc attribute */ static tree cp_parser_objc_protocol_refs_opt (cp_parser *); /* APPLE LOCAL begin radar 5355344 */ static bool cp_parser_objc_tentative_protocol_refs_opt (cp_parser *, tree *); /* APPLE LOCAL end radar 5355344 */ static void cp_parser_objc_declaration (cp_parser *); static tree cp_parser_objc_statement (cp_parser *); /* Utility Routines */ static tree cp_parser_lookup_name (cp_parser *, tree, enum tag_types, bool, bool, bool, tree *); static tree cp_parser_lookup_name_simple (cp_parser *, tree); static tree cp_parser_maybe_treat_template_as_class (tree, bool); static bool cp_parser_check_declarator_template_parameters (cp_parser *, cp_declarator *); static bool cp_parser_check_template_parameters (cp_parser *, unsigned); static tree cp_parser_simple_cast_expression (cp_parser *); static tree cp_parser_global_scope_opt (cp_parser *, bool); static bool cp_parser_constructor_declarator_p (cp_parser *, bool); static tree cp_parser_function_definition_from_specifiers_and_declarator (cp_parser *, cp_decl_specifier_seq *, tree, const cp_declarator *); static tree cp_parser_function_definition_after_declarator (cp_parser *, bool); static void cp_parser_template_declaration_after_export (cp_parser *, bool); static void cp_parser_perform_template_parameter_access_checks (VEC (deferred_access_check,gc)*); static tree cp_parser_single_declaration (cp_parser *, VEC (deferred_access_check,gc)*, bool, bool *); static tree cp_parser_functional_cast (cp_parser *, tree); static tree cp_parser_save_member_function_body (cp_parser *, cp_decl_specifier_seq *, cp_declarator *, tree); static tree cp_parser_enclosed_template_argument_list (cp_parser *); static void cp_parser_save_default_args (cp_parser *, tree); static void cp_parser_late_parsing_for_member (cp_parser *, tree); static void cp_parser_late_parsing_default_args (cp_parser *, tree); static tree cp_parser_sizeof_operand (cp_parser *, enum rid); static bool cp_parser_declares_only_class_p (cp_parser *); static void cp_parser_set_storage_class (cp_parser *, cp_decl_specifier_seq *, enum rid); static void cp_parser_set_decl_spec_type (cp_decl_specifier_seq *, tree, bool); static bool cp_parser_friend_p (const cp_decl_specifier_seq *); static cp_token *cp_parser_require (cp_parser *, enum cpp_ttype, const char *); static cp_token *cp_parser_require_keyword (cp_parser *, enum rid, const char *); static bool cp_parser_token_starts_function_definition_p (cp_token *); static bool cp_parser_next_token_starts_class_definition_p (cp_parser *); static bool cp_parser_next_token_ends_template_argument_p (cp_parser *); static bool cp_parser_nth_token_starts_template_argument_list_p (cp_parser *, size_t); static enum tag_types cp_parser_token_is_class_key (cp_token *); static void cp_parser_check_class_key (enum tag_types, tree type); static void cp_parser_check_access_in_redeclaration (tree type); static bool cp_parser_optional_template_keyword (cp_parser *); static void cp_parser_pre_parsed_nested_name_specifier (cp_parser *); static void cp_parser_cache_group (cp_parser *, enum cpp_ttype, unsigned); static void cp_parser_parse_tentatively (cp_parser *); static void cp_parser_commit_to_tentative_parse (cp_parser *); static void cp_parser_abort_tentative_parse (cp_parser *); static bool cp_parser_parse_definitely (cp_parser *); static inline bool cp_parser_parsing_tentatively (cp_parser *); static bool cp_parser_uncommitted_to_tentative_parse_p (cp_parser *); static void cp_parser_error (cp_parser *, const char *); static void cp_parser_name_lookup_error (cp_parser *, tree, tree, const char *); static bool cp_parser_simulate_error (cp_parser *); static bool cp_parser_check_type_definition (cp_parser *); static void cp_parser_check_for_definition_in_return_type (cp_declarator *, tree); static void cp_parser_check_for_invalid_template_id (cp_parser *, tree); static bool cp_parser_non_integral_constant_expression (cp_parser *, const char *); static void cp_parser_diagnose_invalid_type_name (cp_parser *, tree, tree); static bool cp_parser_parse_and_diagnose_invalid_type_name (cp_parser *); static int cp_parser_skip_to_closing_parenthesis (cp_parser *, bool, bool, bool); static void cp_parser_skip_to_end_of_statement (cp_parser *); static void cp_parser_consume_semicolon_at_end_of_statement (cp_parser *); static void cp_parser_skip_to_end_of_block_or_statement (cp_parser *); static void cp_parser_skip_to_closing_brace (cp_parser *); static void cp_parser_skip_to_end_of_template_parameter_list (cp_parser *); static void cp_parser_skip_to_pragma_eol (cp_parser*, cp_token *); static bool cp_parser_error_occurred (cp_parser *); static bool cp_parser_allow_gnu_extensions_p (cp_parser *); static bool cp_parser_is_string_literal (cp_token *); static bool cp_parser_is_keyword (cp_token *, enum rid); static tree cp_parser_make_typename_type (cp_parser *, tree, tree); /* APPLE LOCAL begin CW asm blocks */ static tree cp_parser_iasm_compound_statement (cp_parser *); static void cp_parser_iasm_declaration_seq_opt (cp_parser *); static void cp_parser_iasm_line_seq_opt (cp_parser *); static void cp_parser_iasm_line (cp_parser *); static void cp_parser_iasm_statement_seq_opt (cp_parser *); static void cp_parser_iasm_statement (cp_parser *); static tree cp_parser_iasm_operands (cp_parser *); static tree cp_parser_iasm_operand (cp_parser *); static tree cp_parser_iasm_postfix_expression (cp_parser *, bool, bool); static tree cp_parser_iasm_identifier_or_number (cp_parser* parser); static tree iasm_build_identifier_string (cp_parser* parser, const char* str); static tree cp_parser_iasm_relative_branch (cp_parser *parser); static void cp_parser_iasm_top_statement (cp_parser *parser); #ifndef IASM_SEE_OPCODE #define IASM_SEE_OPCODE(YYCHAR, T) YYCHAR #endif #define TYPESPEC 1 #define IDENTIFIER 2 /* APPLE LOCAL end CW asm blocks */ /* Returns nonzero if we are parsing tentatively. */ static inline bool cp_parser_parsing_tentatively (cp_parser* parser) { return parser->context->next != NULL; } /* Returns nonzero if TOKEN is a string literal. */ static bool cp_parser_is_string_literal (cp_token* token) { return (token->type == CPP_STRING || token->type == CPP_WSTRING); } /* Returns nonzero if TOKEN is the indicated KEYWORD. */ static bool cp_parser_is_keyword (cp_token* token, enum rid keyword) { return token->keyword == keyword; } /* If not parsing tentatively, issue a diagnostic of the form FILE:LINE: MESSAGE before TOKEN where TOKEN is the next token in the input stream. MESSAGE (specified by the caller) is usually of the form "expected OTHER-TOKEN". */ static void cp_parser_error (cp_parser* parser, const char* message) { if (!cp_parser_simulate_error (parser)) { cp_token *token = cp_lexer_peek_token (parser->lexer); /* This diagnostic makes more sense if it is tagged to the line of the token we just peeked at. */ cp_lexer_set_source_position_from_token (token); if (token->type == CPP_PRAGMA) { error ("%<#pragma%> is not allowed here"); cp_parser_skip_to_pragma_eol (parser, token); return; } c_parse_error (message, /* Because c_parser_error does not understand CPP_KEYWORD, keywords are treated like identifiers. */ (token->type == CPP_KEYWORD ? CPP_NAME : token->type), token->u.value); } } /* Issue an error about name-lookup failing. NAME is the IDENTIFIER_NODE DECL is the result of the lookup (as returned from cp_parser_lookup_name). DESIRED is the thing that we hoped to find. */ static void cp_parser_name_lookup_error (cp_parser* parser, tree name, tree decl, const char* desired) { /* If name lookup completely failed, tell the user that NAME was not declared. */ if (decl == error_mark_node) { if (parser->scope && parser->scope != global_namespace) error ("%<%D::%D%> has not been declared", parser->scope, name); else if (parser->scope == global_namespace) error ("%<::%D%> has not been declared", name); else if (parser->object_scope && !CLASS_TYPE_P (parser->object_scope)) error ("request for member %qD in non-class type %qT", name, parser->object_scope); else if (parser->object_scope) error ("%<%T::%D%> has not been declared", parser->object_scope, name); else error ("%qD has not been declared", name); } else if (parser->scope && parser->scope != global_namespace) error ("%<%D::%D%> %s", parser->scope, name, desired); else if (parser->scope == global_namespace) error ("%<::%D%> %s", name, desired); else error ("%qD %s", name, desired); } /* If we are parsing tentatively, remember that an error has occurred during this tentative parse. Returns true if the error was simulated; false if a message should be issued by the caller. */ static bool cp_parser_simulate_error (cp_parser* parser) { if (cp_parser_uncommitted_to_tentative_parse_p (parser)) { parser->context->status = CP_PARSER_STATUS_KIND_ERROR; return true; } return false; } /* Check for repeated decl-specifiers. */ static void cp_parser_check_decl_spec (cp_decl_specifier_seq *decl_specs) { cp_decl_spec ds; for (ds = ds_first; ds != ds_last; ++ds) { unsigned count = decl_specs->specs[(int)ds]; if (count < 2) continue; /* The "long" specifier is a special case because of "long long". */ if (ds == ds_long) { if (count > 2) error ("%<long long long%> is too long for GCC"); else if (pedantic && !in_system_header && warn_long_long) pedwarn ("ISO C++ does not support %<long long%>"); } else if (count > 1) { static const char *const decl_spec_names[] = { "signed", "unsigned", "short", "long", "const", "volatile", "restrict", "inline", "virtual", "explicit", "friend", "typedef", "__complex", "__thread" /* APPLE LOCAL CW asm blocks */ , "asm" }; error ("duplicate %qs", decl_spec_names[(int)ds]); } } } /* This function is called when a type is defined. If type definitions are forbidden at this point, an error message is issued. */ static bool cp_parser_check_type_definition (cp_parser* parser) { /* If types are forbidden here, issue a message. */ if (parser->type_definition_forbidden_message) { /* Use `%s' to print the string in case there are any escape characters in the message. */ error ("%s", parser->type_definition_forbidden_message); return false; } return true; } /* This function is called when the DECLARATOR is processed. The TYPE was a type defined in the decl-specifiers. If it is invalid to define a type in the decl-specifiers for DECLARATOR, an error is issued. */ static void cp_parser_check_for_definition_in_return_type (cp_declarator *declarator, tree type) { /* [dcl.fct] forbids type definitions in return types. Unfortunately, it's not easy to know whether or not we are processing a return type until after the fact. */ while (declarator && (declarator->kind == cdk_pointer || declarator->kind == cdk_reference || declarator->kind == cdk_ptrmem)) declarator = declarator->declarator; if (declarator && declarator->kind == cdk_function) { error ("new types may not be defined in a return type"); inform ("(perhaps a semicolon is missing after the definition of %qT)", type); } } /* A type-specifier (TYPE) has been parsed which cannot be followed by "<" in any valid C++ program. If the next token is indeed "<", issue a message warning the user about what appears to be an invalid attempt to form a template-id. */ static void cp_parser_check_for_invalid_template_id (cp_parser* parser, tree type) { cp_token_position start = 0; if (cp_lexer_next_token_is (parser->lexer, CPP_LESS)) { if (TYPE_P (type)) error ("%qT is not a template", type); else if (TREE_CODE (type) == IDENTIFIER_NODE) error ("%qE is not a template", type); else error ("invalid template-id"); /* Remember the location of the invalid "<". */ if (cp_parser_uncommitted_to_tentative_parse_p (parser)) start = cp_lexer_token_position (parser->lexer, true); /* Consume the "<". */ cp_lexer_consume_token (parser->lexer); /* Parse the template arguments. */ cp_parser_enclosed_template_argument_list (parser); /* Permanently remove the invalid template arguments so that this error message is not issued again. */ if (start) cp_lexer_purge_tokens_after (parser->lexer, start); } } /* If parsing an integral constant-expression, issue an error message about the fact that THING appeared and return true. Otherwise, return false. In either case, set PARSER->NON_INTEGRAL_CONSTANT_EXPRESSION_P. */ static bool cp_parser_non_integral_constant_expression (cp_parser *parser, const char *thing) { parser->non_integral_constant_expression_p = true; if (parser->integral_constant_expression_p) { if (!parser->allow_non_integral_constant_expression_p) { error ("%s cannot appear in a constant-expression", thing); return true; } } return false; } /* Emit a diagnostic for an invalid type name. SCOPE is the qualifying scope (or NULL, if none) for ID. This function commits to the current active tentative parse, if any. (Otherwise, the problematic construct might be encountered again later, resulting in duplicate error messages.) */ static void cp_parser_diagnose_invalid_type_name (cp_parser *parser, tree scope, tree id) { tree decl, old_scope; /* Try to lookup the identifier. */ old_scope = parser->scope; parser->scope = scope; decl = cp_parser_lookup_name_simple (parser, id); parser->scope = old_scope; /* If the lookup found a template-name, it means that the user forgot to specify an argument list. Emit a useful error message. */ if (TREE_CODE (decl) == TEMPLATE_DECL) error ("invalid use of template-name %qE without an argument list", decl); else if (TREE_CODE (id) == BIT_NOT_EXPR) error ("invalid use of destructor %qD as a type", id); else if (TREE_CODE (decl) == TYPE_DECL) /* Something like 'unsigned A a;' */ error ("invalid combination of multiple type-specifiers"); else if (!parser->scope) { /* Issue an error message. */ error ("%qE does not name a type", id); /* If we're in a template class, it's possible that the user was referring to a type from a base class. For example: template <typename T> struct A { typedef T X; }; template <typename T> struct B : public A<T> { X x; }; The user should have said "typename A<T>::X". */ if (processing_template_decl && current_class_type && TYPE_BINFO (current_class_type)) { tree b; for (b = TREE_CHAIN (TYPE_BINFO (current_class_type)); b; b = TREE_CHAIN (b)) { tree base_type = BINFO_TYPE (b); if (CLASS_TYPE_P (base_type) && dependent_type_p (base_type)) { tree field; /* Go from a particular instantiation of the template (which will have an empty TYPE_FIELDs), to the main version. */ base_type = CLASSTYPE_PRIMARY_TEMPLATE_TYPE (base_type); for (field = TYPE_FIELDS (base_type); field; field = TREE_CHAIN (field)) if (TREE_CODE (field) == TYPE_DECL && DECL_NAME (field) == id) { inform ("(perhaps %<typename %T::%E%> was intended)", BINFO_TYPE (b), id); break; } if (field) break; } } } } /* Here we diagnose qualified-ids where the scope is actually correct, but the identifier does not resolve to a valid type name. */ else if (parser->scope != error_mark_node) { if (TREE_CODE (parser->scope) == NAMESPACE_DECL) error ("%qE in namespace %qE does not name a type", id, parser->scope); else if (TYPE_P (parser->scope)) error ("%qE in class %qT does not name a type", id, parser->scope); else gcc_unreachable (); } cp_parser_commit_to_tentative_parse (parser); } /* Check for a common situation where a type-name should be present, but is not, and issue a sensible error message. Returns true if an invalid type-name was detected. The situation handled by this function are variable declarations of the form `ID a', where `ID' is an id-expression and `a' is a plain identifier. Usually, `ID' should name a type, but if we got here it means that it does not. We try to emit the best possible error message depending on how exactly the id-expression looks like. */ static bool cp_parser_parse_and_diagnose_invalid_type_name (cp_parser *parser) { tree id; cp_parser_parse_tentatively (parser); id = cp_parser_id_expression (parser, /*template_keyword_p=*/false, /*check_dependency_p=*/true, /*template_p=*/NULL, /*declarator_p=*/true, /*optional_p=*/false); /* After the id-expression, there should be a plain identifier, otherwise this is not a simple variable declaration. Also, if the scope is dependent, we cannot do much. */ if (!cp_lexer_next_token_is (parser->lexer, CPP_NAME) || (parser->scope && TYPE_P (parser->scope) && dependent_type_p (parser->scope)) || TREE_CODE (id) == TYPE_DECL) { cp_parser_abort_tentative_parse (parser); return false; } if (!cp_parser_parse_definitely (parser)) return false; /* Emit a diagnostic for the invalid type. */ cp_parser_diagnose_invalid_type_name (parser, parser->scope, id); /* Skip to the end of the declaration; there's no point in trying to process it. */ cp_parser_skip_to_end_of_block_or_statement (parser); return true; } /* Consume tokens up to, and including, the next non-nested closing `)'. Returns 1 iff we found a closing `)'. RECOVERING is true, if we are doing error recovery. Returns -1 if OR_COMMA is true and we found an unnested comma. */ static int cp_parser_skip_to_closing_parenthesis (cp_parser *parser, bool recovering, bool or_comma, bool consume_paren) { unsigned paren_depth = 0; unsigned brace_depth = 0; if (recovering && !or_comma && cp_parser_uncommitted_to_tentative_parse_p (parser)) return 0; while (true) { cp_token * token = cp_lexer_peek_token (parser->lexer); switch (token->type) { case CPP_EOF: case CPP_PRAGMA_EOL: /* If we've run out of tokens, then there is no closing `)'. */ return 0; case CPP_SEMICOLON: /* This matches the processing in skip_to_end_of_statement. */ if (!brace_depth) return 0; break; case CPP_OPEN_BRACE: ++brace_depth; break; case CPP_CLOSE_BRACE: if (!brace_depth--) return 0; break; case CPP_COMMA: if (recovering && or_comma && !brace_depth && !paren_depth) return -1; break; case CPP_OPEN_PAREN: if (!brace_depth) ++paren_depth; break; case CPP_CLOSE_PAREN: if (!brace_depth && !paren_depth--) { if (consume_paren) cp_lexer_consume_token (parser->lexer); return 1; } break; default: break; } /* Consume the token. */ cp_lexer_consume_token (parser->lexer); } } /* Consume tokens until we reach the end of the current statement. Normally, that will be just before consuming a `;'. However, if a non-nested `}' comes first, then we stop before consuming that. */ static void cp_parser_skip_to_end_of_statement (cp_parser* parser) { unsigned nesting_depth = 0; while (true) { cp_token *token = cp_lexer_peek_token (parser->lexer); switch (token->type) { case CPP_EOF: case CPP_PRAGMA_EOL: /* If we've run out of tokens, stop. */ return; case CPP_SEMICOLON: /* If the next token is a `;', we have reached the end of the statement. */ if (!nesting_depth) return; break; case CPP_CLOSE_BRACE: /* If this is a non-nested '}', stop before consuming it. That way, when confronted with something like: { 3 + } we stop before consuming the closing '}', even though we have not yet reached a `;'. */ if (nesting_depth == 0) return; /* If it is the closing '}' for a block that we have scanned, stop -- but only after consuming the token. That way given: void f g () { ... } typedef int I; we will stop after the body of the erroneously declared function, but before consuming the following `typedef' declaration. */ if (--nesting_depth == 0) { cp_lexer_consume_token (parser->lexer); return; } case CPP_OPEN_BRACE: ++nesting_depth; break; default: break; } /* Consume the token. */ cp_lexer_consume_token (parser->lexer); } } /* APPLE LOCAL begin radar 5277239 */ /* This routine checks that type_decl is a class or class object followed by a '.' which is an alternative syntax to class-method messaging [class-name class-method] */ static bool cp_objc_property_reference_prefix (cp_parser *parser, tree type) { return c_dialect_objc () && cp_lexer_peek_token (parser->lexer)->type == CPP_DOT && (objc_is_id (type) || objc_is_class_name (type)); } /* APPLE LOCAL end radar 5277239 */ /* APPLE LOCAL begin C* property (Radar 4436866, 4591909) */ /* This routine parses the propery declarations. */ static void objc_cp_parse_property_decl (cp_parser *parser) { int declares_class_or_enum; cp_decl_specifier_seq declspecs; cp_parser_decl_specifier_seq (parser, CP_PARSER_FLAGS_NONE, &declspecs, &declares_class_or_enum); /* Keep going until we hit the `;' at the end of the declaration. */ while (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON)) { tree property; cp_token *token; cp_declarator *declarator = cp_parser_declarator (parser, CP_PARSER_DECLARATOR_NAMED, NULL, NULL, false); property = grokdeclarator (declarator, &declspecs, NORMAL,0, NULL); /* Revover from any kind of error in property declaration. */ if (property == error_mark_node || property == NULL_TREE) return; /* APPLE LOCAL begin objc new property */ if (declspecs.attributes) cplus_decl_attributes (&property, declspecs.attributes, 0); /* APPLE LOCAL begin radar 4712415 */ token = cp_lexer_peek_token (parser->lexer); if (token->keyword == RID_ATTRIBUTE) { /* Attribute on the property itself. */ declspecs.attributes = cp_parser_attributes_opt (parser); cplus_decl_attributes (&property, declspecs.attributes, 0); token = cp_lexer_peek_token (parser->lexer); } /* APPLE LOCAL end radar 4712415 */ /* APPLE LOCAL end objc new property */ /* Add to property list. */ objc_add_property_variable (copy_node (property)); if (token->type == CPP_COMMA) { cp_lexer_consume_token (parser->lexer); /* Eat ','. */ continue; } else if (token->type == CPP_EOF) return; } cp_lexer_consume_token (parser->lexer); /* Eat ';'. */ } /* APPLE LOCAL begin objc new property */ /* This routine parses @synthesize property_name[=ivar],... or @dynamic paroperty_name,... syntax. */ static void objc_cp_parser_property_impl (cp_parser *parser, enum rid keyword) { cp_lexer_consume_token (parser->lexer); /* Eat @synthesize or @dynamic */ if (keyword == RID_AT_DYNAMIC) { tree identifier_list = cp_parser_objc_identifier_list (parser); objc_declare_property_impl (2, identifier_list); } else { cp_token *sep = cp_lexer_peek_token (parser->lexer); tree list = NULL_TREE; do { tree property; tree identifier_list; if (sep->type == CPP_COMMA) cp_lexer_consume_token (parser->lexer); /* Eat ',' */ property = cp_parser_identifier (parser); sep = cp_lexer_peek_token (parser->lexer); if (sep->type == CPP_EQ) { cp_lexer_consume_token (parser->lexer); /* '=' */ identifier_list = build_tree_list (cp_parser_identifier (parser), property); } else identifier_list = build_tree_list (NULL_TREE, property); list = chainon (list, identifier_list); sep = cp_lexer_peek_token (parser->lexer); } while (sep->type == CPP_COMMA); objc_declare_property_impl (1, list); } cp_parser_consume_semicolon_at_end_of_statement (parser); } /* APPLE LOCAL end objc new property */ /* This function parses a @property declaration inside an objective class or its implementation. */ static void objc_cp_parser_at_property (cp_parser *parser) { cp_token *token; objc_set_property_attr (0, NULL_TREE); /* Consume @property */ cp_lexer_consume_token (parser->lexer); token = cp_lexer_peek_token (parser->lexer); if (token->type == CPP_OPEN_PAREN) { cp_lexer_consume_token (parser->lexer); while (token->type != CPP_CLOSE_PAREN && token->type != CPP_EOF) { tree node; /* property has attribute list. */ /* Consume '(' */ node = cp_parser_identifier (parser); if (node == ridpointers [(int) RID_READONLY]) { /* Do the readyonly thing. */ objc_set_property_attr (1, NULL_TREE); } else if (node == ridpointers [(int) RID_GETTER] || node == ridpointers [(int) RID_SETTER]) { /* Do the getter/setter attribute. */ token = cp_lexer_consume_token (parser->lexer); if (token->type == CPP_EQ) { /* APPLE LOCAL radar 4675792 */ tree attr_ident = cp_parser_objc_selector (parser); int num; if (node == ridpointers [(int) RID_GETTER]) num = 2; else { num = 3; /* Consume the ':' which must always follow the setter name. */ if (cp_lexer_next_token_is (parser->lexer, CPP_COLON)) cp_lexer_consume_token (parser->lexer); } objc_set_property_attr (num, attr_ident); } else { error ("getter/setter attribute must be followed by '='"); break; } } /* APPLE LOCAL begin objc new property */ else if (node == ridpointers [(int) RID_READWRITE]) { objc_set_property_attr (9, NULL_TREE); } else if (node == ridpointers [(int) RID_ASSIGN]) { objc_set_property_attr (10, NULL_TREE); } else if (node == ridpointers [(int) RID_RETAIN]) { objc_set_property_attr (11, NULL_TREE); } else if (node == ridpointers [(int) RID_COPY]) { objc_set_property_attr (12, NULL_TREE); } /* APPLE LOCAL end objc new property */ /* APPLE LOCAL begin radar 4947014 - objc atomic property */ else if (node == ridpointers [(int) RID_NONATOMIC]) { objc_set_property_attr (13, NULL_TREE); } /* APPLE LOCAL end radar 4947014 - objc atomic property */ else { error ("unknown property attribute"); break; } /* APPLE LOCAL begin radar 6302949 */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_COMMA) && cp_lexer_next_token_is_not (parser->lexer, CPP_CLOSE_PAREN) && cp_lexer_next_token_is_not (parser->lexer, CPP_EOF)) warning (0, "property attributes must be separated by a comma"); /* APPLE LOCAL end radar 6302949 */ if (cp_lexer_next_token_is (parser->lexer, CPP_COMMA)) cp_lexer_consume_token (parser->lexer); token = cp_lexer_peek_token (parser->lexer); } if (token->type != CPP_CLOSE_PAREN) { error ("syntax error in @property's attribute declaration"); } /* Consume ')' */ cp_lexer_consume_token (parser->lexer); } /* APPLE LOCAL begin weak_import on property 7496972 */ note_objc_property_decl_context (); objc_cp_parse_property_decl (parser); note_end_objc_property_decl_context (); /* APPLE LOCAL end weak_import on property 7496972 */ } /* APPLE LOCAL end C* property (Radar 4436866, 4591909) */ /* This function is called at the end of a statement or declaration. If the next token is a semicolon, it is consumed; otherwise, error recovery is attempted. */ static void cp_parser_consume_semicolon_at_end_of_statement (cp_parser *parser) { /* Look for the trailing `;'. */ if (!cp_parser_require (parser, CPP_SEMICOLON, "`;'")) { /* If there is additional (erroneous) input, skip to the end of the statement. */ cp_parser_skip_to_end_of_statement (parser); /* If the next token is now a `;', consume it. */ if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON)) cp_lexer_consume_token (parser->lexer); } } /* Skip tokens until we have consumed an entire block, or until we have consumed a non-nested `;'. */ static void cp_parser_skip_to_end_of_block_or_statement (cp_parser* parser) { int nesting_depth = 0; while (nesting_depth >= 0) { cp_token *token = cp_lexer_peek_token (parser->lexer); switch (token->type) { case CPP_EOF: case CPP_PRAGMA_EOL: /* If we've run out of tokens, stop. */ return; case CPP_SEMICOLON: /* Stop if this is an unnested ';'. */ if (!nesting_depth) nesting_depth = -1; break; case CPP_CLOSE_BRACE: /* Stop if this is an unnested '}', or closes the outermost nesting level. */ nesting_depth--; if (!nesting_depth) nesting_depth = -1; break; case CPP_OPEN_BRACE: /* Nest. */ nesting_depth++; break; default: break; } /* Consume the token. */ cp_lexer_consume_token (parser->lexer); } } /* Skip tokens until a non-nested closing curly brace is the next token. */ static void cp_parser_skip_to_closing_brace (cp_parser *parser) { unsigned nesting_depth = 0; while (true) { cp_token *token = cp_lexer_peek_token (parser->lexer); switch (token->type) { case CPP_EOF: case CPP_PRAGMA_EOL: /* If we've run out of tokens, stop. */ return; case CPP_CLOSE_BRACE: /* If the next token is a non-nested `}', then we have reached the end of the current block. */ if (nesting_depth-- == 0) return; break; case CPP_OPEN_BRACE: /* If it the next token is a `{', then we are entering a new block. Consume the entire block. */ ++nesting_depth; break; default: break; } /* Consume the token. */ cp_lexer_consume_token (parser->lexer); } } /* Consume tokens until we reach the end of the pragma. The PRAGMA_TOK parameter is the PRAGMA token, allowing us to purge the entire pragma sequence. */ static void cp_parser_skip_to_pragma_eol (cp_parser* parser, cp_token *pragma_tok) { cp_token *token; parser->lexer->in_pragma = false; do token = cp_lexer_consume_token (parser->lexer); while (token->type != CPP_PRAGMA_EOL && token->type != CPP_EOF); /* Ensure that the pragma is not parsed again. */ cp_lexer_purge_tokens_after (parser->lexer, pragma_tok); } /* Require pragma end of line, resyncing with it as necessary. The arguments are as for cp_parser_skip_to_pragma_eol. */ static void cp_parser_require_pragma_eol (cp_parser *parser, cp_token *pragma_tok) { parser->lexer->in_pragma = false; if (!cp_parser_require (parser, CPP_PRAGMA_EOL, "end of line")) cp_parser_skip_to_pragma_eol (parser, pragma_tok); } /* This is a simple wrapper around make_typename_type. When the id is an unresolved identifier node, we can provide a superior diagnostic using cp_parser_diagnose_invalid_type_name. */ static tree cp_parser_make_typename_type (cp_parser *parser, tree scope, tree id) { tree result; if (TREE_CODE (id) == IDENTIFIER_NODE) { result = make_typename_type (scope, id, typename_type, /*complain=*/tf_none); if (result == error_mark_node) cp_parser_diagnose_invalid_type_name (parser, scope, id); return result; } return make_typename_type (scope, id, typename_type, tf_error); } /* Create a new C++ parser. */ static cp_parser * cp_parser_new (void) { cp_parser *parser; cp_lexer *lexer; unsigned i; /* cp_lexer_new_main is called before calling ggc_alloc because cp_lexer_new_main might load a PCH file. */ lexer = cp_lexer_new_main (); /* Initialize the binops_by_token so that we can get the tree directly from the token. */ for (i = 0; i < sizeof (binops) / sizeof (binops[0]); i++) binops_by_token[binops[i].token_type] = binops[i]; parser = GGC_CNEW (cp_parser); parser->lexer = lexer; parser->context = cp_parser_context_new (NULL); /* For now, we always accept GNU extensions. */ parser->allow_gnu_extensions_p = 1; /* The `>' token is a greater-than operator, not the end of a template-id. */ parser->greater_than_is_operator_p = true; parser->default_arg_ok_p = true; /* We are not parsing a constant-expression. */ parser->integral_constant_expression_p = false; parser->allow_non_integral_constant_expression_p = false; parser->non_integral_constant_expression_p = false; /* Local variable names are not forbidden. */ parser->local_variables_forbidden_p = false; /* We are not processing an `extern "C"' declaration. */ parser->in_unbraced_linkage_specification_p = false; /* We are not processing a declarator. */ parser->in_declarator_p = false; /* We are not processing a template-argument-list. */ parser->in_template_argument_list_p = false; /* We are not in an iteration statement. */ parser->in_statement = 0; /* We are not in a switch statement. */ parser->in_switch_statement_p = false; /* We are not parsing a type-id inside an expression. */ parser->in_type_id_in_expr_p = false; /* Declarations aren't implicitly extern "C". */ parser->implicit_extern_c = false; /* String literals should be translated to the execution character set. */ parser->translate_strings_p = true; /* We are not parsing a function body. */ parser->in_function_body = false; /* The unparsed function queue is empty. */ parser->unparsed_functions_queues = build_tree_list (NULL_TREE, NULL_TREE); /* There are no classes being defined. */ parser->num_classes_being_defined = 0; /* No template parameters apply. */ parser->num_template_parameter_lists = 0; return parser; } /* Create a cp_lexer structure which will emit the tokens in CACHE and push it onto the parser's lexer stack. This is used for delayed parsing of in-class method bodies and default arguments, and should not be confused with tentative parsing. */ static void cp_parser_push_lexer_for_tokens (cp_parser *parser, cp_token_cache *cache) { cp_lexer *lexer = cp_lexer_new_from_tokens (cache); lexer->next = parser->lexer; parser->lexer = lexer; /* Move the current source position to that of the first token in the new lexer. */ cp_lexer_set_source_position_from_token (lexer->next_token); } /* Pop the top lexer off the parser stack. This is never used for the "main" lexer, only for those pushed by cp_parser_push_lexer_for_tokens. */ static void cp_parser_pop_lexer (cp_parser *parser) { cp_lexer *lexer = parser->lexer; parser->lexer = lexer->next; cp_lexer_destroy (lexer); /* Put the current source position back where it was before this lexer was pushed. */ cp_lexer_set_source_position_from_token (parser->lexer->next_token); } /* Lexical conventions [gram.lex] */ /* Parse an identifier. Returns an IDENTIFIER_NODE representing the identifier. */ static tree cp_parser_identifier (cp_parser* parser) { cp_token *token; /* Look for the identifier. */ token = cp_parser_require (parser, CPP_NAME, "identifier"); /* Return the value. */ return token ? token->u.value : error_mark_node; } /* Parse a sequence of adjacent string constants. Returns a TREE_STRING representing the combined, nul-terminated string constant. If TRANSLATE is true, translate the string to the execution character set. If WIDE_OK is true, a wide string is invalid here. C++98 [lex.string] says that if a narrow string literal token is adjacent to a wide string literal token, the behavior is undefined. However, C99 6.4.5p4 says that this results in a wide string literal. We follow C99 here, for consistency with the C front end. This code is largely lifted from lex_string() in c-lex.c. FUTURE: ObjC++ will need to handle @-strings here. */ static tree cp_parser_string_literal (cp_parser *parser, bool translate, bool wide_ok) { tree value; bool wide = false; /* APPLE LOCAL pascal strings */ bool pascal_p = false; size_t count; struct obstack str_ob; cpp_string str, istr, *strs; cp_token *tok; tok = cp_lexer_peek_token (parser->lexer); if (!cp_parser_is_string_literal (tok)) { cp_parser_error (parser, "expected string-literal"); return error_mark_node; } /* Try to avoid the overhead of creating and destroying an obstack for the common case of just one string. */ if (!cp_parser_is_string_literal (cp_lexer_peek_nth_token (parser->lexer, 2))) { cp_lexer_consume_token (parser->lexer); str.text = (const unsigned char *)TREE_STRING_POINTER (tok->u.value); str.len = TREE_STRING_LENGTH (tok->u.value); count = 1; if (tok->type == CPP_WSTRING) wide = true; /* APPLE LOCAL begin pascal strings */ if (CPP_OPTION (parse_in, pascal_strings)) { if (wide && str.text[0] == 'L' && str.text[2] == '\\' && str.text[3] == 'p') pascal_p = true; else if (str.text[1] == '\\' && str.text[2] == 'p') pascal_p = true; } /* APPLE LOCAL end pascal strings */ strs = &str; } else { gcc_obstack_init (&str_ob); count = 0; do { cp_lexer_consume_token (parser->lexer); count++; str.text = (unsigned char *)TREE_STRING_POINTER (tok->u.value); str.len = TREE_STRING_LENGTH (tok->u.value); if (tok->type == CPP_WSTRING) wide = true; /* APPLE LOCAL begin pascal strings */ if (CPP_OPTION (parse_in, pascal_strings) && count == 1) { if (wide && str.text[0] == 'L' && str.text[2] == '\\' && str.text[3] == 'p') pascal_p = true; else if (str.text[1] == '\\' && str.text[2] == 'p') pascal_p = true; } /* APPLE LOCAL end pascal strings */ obstack_grow (&str_ob, &str, sizeof (cpp_string)); tok = cp_lexer_peek_token (parser->lexer); } while (cp_parser_is_string_literal (tok)); strs = (cpp_string *) obstack_finish (&str_ob); } if (wide && !wide_ok) { cp_parser_error (parser, "a wide string is invalid in this context"); wide = false; } if ((translate ? cpp_interpret_string : cpp_interpret_string_notranslate) /* APPLE LOCAL pascal strings */ (parse_in, strs, count, &istr, wide, pascal_p)) { value = build_string (istr.len, (char *)istr.text); free ((void *)istr.text); /* APPLE LOCAL begin pascal strings */ TREE_TYPE (value) = wide ? wchar_array_type_node : pascal_p ? pascal_string_type_node : char_array_type_node; /* APPLE LOCAL end pascal strings */ value = fix_string_type (value); } else /* cpp_interpret_string has issued an error. */ value = error_mark_node; if (count > 1) obstack_free (&str_ob, 0); return value; } /* Basic concepts [gram.basic] */ /* Parse a translation-unit. translation-unit: declaration-seq [opt] Returns TRUE if all went well. */ static bool cp_parser_translation_unit (cp_parser* parser) { /* The address of the first non-permanent object on the declarator obstack. */ static void *declarator_obstack_base; bool success; /* Create the declarator obstack, if necessary. */ if (!cp_error_declarator) { gcc_obstack_init (&declarator_obstack); /* Create the error declarator. */ cp_error_declarator = make_declarator (cdk_error); /* Create the empty parameter list. */ no_parameters = make_parameter_declarator (NULL, NULL, NULL_TREE); /* Remember where the base of the declarator obstack lies. */ declarator_obstack_base = obstack_next_free (&declarator_obstack); } cp_parser_declaration_seq_opt (parser); /* If there are no tokens left then all went well. */ if (cp_lexer_next_token_is (parser->lexer, CPP_EOF)) { /* Get rid of the token array; we don't need it any more. */ cp_lexer_destroy (parser->lexer); parser->lexer = NULL; /* This file might have been a context that's implicitly extern "C". If so, pop the lang context. (Only relevant for PCH.) */ if (parser->implicit_extern_c) { pop_lang_context (); parser->implicit_extern_c = false; } /* Finish up. */ finish_translation_unit (); success = true; } else { cp_parser_error (parser, "expected declaration"); success = false; } /* Make sure the declarator obstack was fully cleaned up. */ gcc_assert (obstack_next_free (&declarator_obstack) == declarator_obstack_base); /* All went well. */ return success; } /* Expressions [gram.expr] */ /* Parse a primary-expression. primary-expression: literal this ( expression ) id-expression GNU Extensions: primary-expression: ( compound-statement ) __builtin_va_arg ( assignment-expression , type-id ) __builtin_offsetof ( type-id , offsetof-expression ) APPLE LOCAL blocks 6040305 (cf) block-literal-expr Objective-C++ Extension: primary-expression: objc-expression literal: __null ADDRESS_P is true iff this expression was immediately preceded by "&" and therefore might denote a pointer-to-member. CAST_P is true iff this expression is the target of a cast. TEMPLATE_ARG_P is true iff this expression is a template argument. Returns a representation of the expression. Upon return, *IDK indicates what kind of id-expression (if any) was present. */ static tree cp_parser_primary_expression (cp_parser *parser, bool address_p, bool cast_p, bool template_arg_p, cp_id_kind *idk) { cp_token *token; /* APPLE LOCAL CW asm blocks */ int atsignhack = 0; /* Assume the primary expression is not an id-expression. */ *idk = CP_ID_KIND_NONE; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); switch (token->type) { /* APPLE LOCAL begin blocks 6040305 (cf) */ case CPP_XOR: if (flag_blocks) { tree expr = cp_parser_block_literal_expr (parser); return expr; } cp_parser_error (parser, "expected primary-expression"); return error_mark_node; /* APPLE LOCAL end blocks 6040305 (cf) */ /* literal: integer-literal character-literal floating-literal string-literal boolean-literal */ case CPP_CHAR: case CPP_WCHAR: case CPP_NUMBER: token = cp_lexer_consume_token (parser->lexer); /* Floating-point literals are only allowed in an integral constant expression if they are cast to an integral or enumeration type. */ if (TREE_CODE (token->u.value) == REAL_CST && parser->integral_constant_expression_p && pedantic) { /* CAST_P will be set even in invalid code like "int(2.7 + ...)". Therefore, we have to check that the next token is sure to end the cast. */ if (cast_p) { cp_token *next_token; next_token = cp_lexer_peek_token (parser->lexer); if (/* The comma at the end of an enumerator-definition. */ next_token->type != CPP_COMMA /* The curly brace at the end of an enum-specifier. */ && next_token->type != CPP_CLOSE_BRACE /* The end of a statement. */ && next_token->type != CPP_SEMICOLON /* The end of the cast-expression. */ && next_token->type != CPP_CLOSE_PAREN /* The end of an array bound. */ && next_token->type != CPP_CLOSE_SQUARE /* The closing ">" in a template-argument-list. */ && (next_token->type != CPP_GREATER || parser->greater_than_is_operator_p)) cast_p = false; } /* If we are within a cast, then the constraint that the cast is to an integral or enumeration type will be checked at that point. If we are not within a cast, then this code is invalid. */ if (!cast_p) cp_parser_non_integral_constant_expression (parser, "floating-point literal"); } return token->u.value; case CPP_STRING: case CPP_WSTRING: /* ??? Should wide strings be allowed when parser->translate_strings_p is false (i.e. in attributes)? If not, we can kill the third argument to cp_parser_string_literal. */ return cp_parser_string_literal (parser, parser->translate_strings_p, true); case CPP_OPEN_PAREN: { tree expr; bool saved_greater_than_is_operator_p; /* Consume the `('. */ cp_lexer_consume_token (parser->lexer); /* Within a parenthesized expression, a `>' token is always the greater-than operator. */ saved_greater_than_is_operator_p = parser->greater_than_is_operator_p; parser->greater_than_is_operator_p = true; /* If we see `( { ' then we are looking at the beginning of a GNU statement-expression. */ if (cp_parser_allow_gnu_extensions_p (parser) && cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE)) { /* Statement-expressions are not allowed by the standard. */ if (pedantic) pedwarn ("ISO C++ forbids braced-groups within expressions"); /* And they're not allowed outside of a function-body; you cannot, for example, write: int i = ({ int j = 3; j + 1; }); at class or namespace scope. */ if (!parser->in_function_body) error ("statement-expressions are allowed only inside functions"); /* Start the statement-expression. */ expr = begin_stmt_expr (); /* Parse the compound-statement. */ /* APPLE LOCAL radar 5982990 */ cp_parser_compound_statement (parser, expr, false, false); /* Finish up. */ expr = finish_stmt_expr (expr, false); } else { /* Parse the parenthesized expression. */ expr = cp_parser_expression (parser, cast_p); /* Let the front end know that this expression was enclosed in parentheses. This matters in case, for example, the expression is of the form `A::B', since `&A::B' might be a pointer-to-member, but `&(A::B)' is not. */ finish_parenthesized_expr (expr); } /* The `>' token might be the end of a template-id or template-parameter-list now. */ parser->greater_than_is_operator_p = saved_greater_than_is_operator_p; /* Consume the `)'. */ if (!cp_parser_require (parser, CPP_CLOSE_PAREN, "`)'")) cp_parser_skip_to_end_of_statement (parser); return expr; } case CPP_KEYWORD: switch (token->keyword) { /* These two are the boolean literals. */ case RID_TRUE: cp_lexer_consume_token (parser->lexer); return boolean_true_node; case RID_FALSE: cp_lexer_consume_token (parser->lexer); return boolean_false_node; /* The `__null' literal. */ case RID_NULL: cp_lexer_consume_token (parser->lexer); return null_node; /* Recognize the `this' keyword. */ case RID_THIS: cp_lexer_consume_token (parser->lexer); if (parser->local_variables_forbidden_p) { error ("%<this%> may not be used in this context"); return error_mark_node; } /* Pointers cannot appear in constant-expressions. */ if (cp_parser_non_integral_constant_expression (parser, "`this'")) return error_mark_node; return finish_this_expr (); /* The `operator' keyword can be the beginning of an id-expression. */ case RID_OPERATOR: goto id_expression; case RID_FUNCTION_NAME: case RID_PRETTY_FUNCTION_NAME: case RID_C99_FUNCTION_NAME: /* The symbols __FUNCTION__, __PRETTY_FUNCTION__, and __func__ are the names of variables -- but they are treated specially. Therefore, they are handled here, rather than relying on the generic id-expression logic below. Grammatically, these names are id-expressions. Consume the token. */ token = cp_lexer_consume_token (parser->lexer); /* Look up the name. */ return finish_fname (token->u.value); case RID_VA_ARG: { tree expression; tree type; /* The `__builtin_va_arg' construct is used to handle `va_arg'. Consume the `__builtin_va_arg' token. */ cp_lexer_consume_token (parser->lexer); /* Look for the opening `('. */ cp_parser_require (parser, CPP_OPEN_PAREN, "`('"); /* Now, parse the assignment-expression. */ expression = cp_parser_assignment_expression (parser, /*cast_p=*/false); /* Look for the `,'. */ cp_parser_require (parser, CPP_COMMA, "`,'"); /* Parse the type-id. */ type = cp_parser_type_id (parser); /* Look for the closing `)'. */ cp_parser_require (parser, CPP_CLOSE_PAREN, "`)'"); /* Using `va_arg' in a constant-expression is not allowed. */ if (cp_parser_non_integral_constant_expression (parser, "`va_arg'")) return error_mark_node; return build_x_va_arg (expression, type); } case RID_OFFSETOF: return cp_parser_builtin_offsetof (parser); /* Objective-C++ expressions. */ case RID_AT_ENCODE: case RID_AT_PROTOCOL: case RID_AT_SELECTOR: return cp_parser_objc_expression (parser); default: cp_parser_error (parser, "expected primary-expression"); return error_mark_node; } /* APPLE LOCAL begin CW asm blocks */ case CPP_ATSIGN: /* Recognize @-labels and handle them specially later. */ cp_lexer_consume_token (parser->lexer); atsignhack = 1; token = cp_lexer_peek_token (parser->lexer); /* APPLE LOCAL end CW asm blocks */ /* An id-expression can start with either an identifier, a `::' as the beginning of a qualified-id, or the "operator" keyword. */ case CPP_NAME: case CPP_SCOPE: case CPP_TEMPLATE_ID: case CPP_NESTED_NAME_SPECIFIER: { tree id_expression; tree decl; const char *error_msg; bool template_p; bool done; id_expression: /* Parse the id-expression. */ id_expression = cp_parser_id_expression (parser, /*template_keyword_p=*/false, /*check_dependency_p=*/true, &template_p, /*declarator_p=*/false, /*optional_p=*/false); /* APPLE LOCAL begin CW asm blocks */ /* Replace the id with an id prefixed with @. */ if (atsignhack && id_expression != error_mark_node) id_expression = prepend_char_identifier (id_expression, '@'); /* APPLE LOCAL end CW asm blocks */ if (id_expression == error_mark_node) return error_mark_node; token = cp_lexer_peek_token (parser->lexer); done = (token->type != CPP_OPEN_SQUARE && token->type != CPP_OPEN_PAREN && token->type != CPP_DOT && token->type != CPP_DEREF && token->type != CPP_PLUS_PLUS && token->type != CPP_MINUS_MINUS); /* If we have a template-id, then no further lookup is required. If the template-id was for a template-class, we will sometimes have a TYPE_DECL at this point. */ if (TREE_CODE (id_expression) == TEMPLATE_ID_EXPR || TREE_CODE (id_expression) == TYPE_DECL) decl = id_expression; /* Look up the name. */ else { tree ambiguous_decls; decl = cp_parser_lookup_name (parser, id_expression, none_type, template_p, /*is_namespace=*/false, /*check_dependency=*/true, &ambiguous_decls); /* If the lookup was ambiguous, an error will already have been issued. */ if (ambiguous_decls) return error_mark_node; /* APPLE LOCAL begin radar 5277239 */ if (TREE_CODE (decl) == TYPE_DECL && cp_objc_property_reference_prefix (parser, TREE_TYPE (decl))) return cp_parser_objc_reference_expression (parser, decl); /* APPLE LOCAL end radar 5277239 */ /* In Objective-C++, an instance variable (ivar) may be preferred to whatever cp_parser_lookup_name() found. */ decl = objc_lookup_ivar (decl, id_expression); /* If name lookup gives us a SCOPE_REF, then the qualifying scope was dependent. */ if (TREE_CODE (decl) == SCOPE_REF) { /* At this point, we do not know if DECL is a valid integral constant expression. We assume that it is in fact such an expression, so that code like: template <int N> struct A { int a[B<N>::i]; }; is accepted. At template-instantiation time, we will check that B<N>::i is actually a constant. */ return decl; } /* Check to see if DECL is a local variable in a context where that is forbidden. */ if (parser->local_variables_forbidden_p && local_variable_p (decl)) { /* It might be that we only found DECL because we are trying to be generous with pre-ISO scoping rules. For example, consider: int i; void g() { for (int i = 0; i < 10; ++i) {} extern void f(int j = i); } Here, name look up will originally find the out of scope `i'. We need to issue a warning message, but then use the global `i'. */ decl = check_for_out_of_scope_variable (decl); if (local_variable_p (decl)) { error ("local variable %qD may not appear in this context", decl); return error_mark_node; } } } decl = (finish_id_expression (id_expression, decl, parser->scope, idk, parser->integral_constant_expression_p, parser->allow_non_integral_constant_expression_p, &parser->non_integral_constant_expression_p, template_p, done, address_p, template_arg_p, &error_msg)); if (error_msg) cp_parser_error (parser, error_msg); return decl; } /* Anything else is an error. */ default: /* APPLE LOCAL begin CW asm blocks */ if (inside_iasm_block) { if (token->type == CPP_OPEN_SQUARE) { tree expr; cp_lexer_consume_token (parser->lexer); expr = cp_parser_iasm_operand (parser); cp_parser_require (parser, CPP_CLOSE_SQUARE, "`]'"); return iasm_build_bracket (expr, NULL_TREE); } } /* APPLE LOCAL end CW asm blocks */ /* ...unless we have an Objective-C++ message or string literal, that is. */ if (c_dialect_objc () && (token->type == CPP_OPEN_SQUARE || token->type == CPP_OBJC_STRING)) return cp_parser_objc_expression (parser); cp_parser_error (parser, "expected primary-expression"); return error_mark_node; } } /* Parse an id-expression. id-expression: unqualified-id qualified-id qualified-id: :: [opt] nested-name-specifier template [opt] unqualified-id :: identifier :: operator-function-id :: template-id Return a representation of the unqualified portion of the identifier. Sets PARSER->SCOPE to the qualifying scope if there is a `::' or nested-name-specifier. Often, if the id-expression was a qualified-id, the caller will want to make a SCOPE_REF to represent the qualified-id. This function does not do this in order to avoid wastefully creating SCOPE_REFs when they are not required. If TEMPLATE_KEYWORD_P is true, then we have just seen the `template' keyword. If CHECK_DEPENDENCY_P is false, then names are looked up inside uninstantiated templates. If *TEMPLATE_P is non-NULL, it is set to true iff the `template' keyword is used to explicitly indicate that the entity named is a template. If DECLARATOR_P is true, the id-expression is appearing as part of a declarator, rather than as part of an expression. */ static tree cp_parser_id_expression (cp_parser *parser, bool template_keyword_p, bool check_dependency_p, bool *template_p, bool declarator_p, bool optional_p) { bool global_scope_p; bool nested_name_specifier_p; /* Assume the `template' keyword was not used. */ if (template_p) *template_p = template_keyword_p; /* Look for the optional `::' operator. */ global_scope_p = (cp_parser_global_scope_opt (parser, /*current_scope_valid_p=*/false) != NULL_TREE); /* Look for the optional nested-name-specifier. */ nested_name_specifier_p = (cp_parser_nested_name_specifier_opt (parser, /*typename_keyword_p=*/false, check_dependency_p, /*type_p=*/false, declarator_p) != NULL_TREE); /* If there is a nested-name-specifier, then we are looking at the first qualified-id production. */ if (nested_name_specifier_p) { tree saved_scope; tree saved_object_scope; tree saved_qualifying_scope; tree unqualified_id; bool is_template; /* See if the next token is the `template' keyword. */ if (!template_p) template_p = &is_template; *template_p = cp_parser_optional_template_keyword (parser); /* Name lookup we do during the processing of the unqualified-id might obliterate SCOPE. */ saved_scope = parser->scope; saved_object_scope = parser->object_scope; saved_qualifying_scope = parser->qualifying_scope; /* Process the final unqualified-id. */ unqualified_id = cp_parser_unqualified_id (parser, *template_p, check_dependency_p, declarator_p, /*optional_p=*/false); /* Restore the SAVED_SCOPE for our caller. */ parser->scope = saved_scope; parser->object_scope = saved_object_scope; parser->qualifying_scope = saved_qualifying_scope; return unqualified_id; } /* Otherwise, if we are in global scope, then we are looking at one of the other qualified-id productions. */ else if (global_scope_p) { cp_token *token; tree id; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* If it's an identifier, and the next token is not a "<", then we can avoid the template-id case. This is an optimization for this common case. */ if (token->type == CPP_NAME && !cp_parser_nth_token_starts_template_argument_list_p (parser, 2)) return cp_parser_identifier (parser); cp_parser_parse_tentatively (parser); /* Try a template-id. */ id = cp_parser_template_id (parser, /*template_keyword_p=*/false, /*check_dependency_p=*/true, declarator_p); /* If that worked, we're done. */ if (cp_parser_parse_definitely (parser)) return id; /* Peek at the next token. (Changes in the token buffer may have invalidated the pointer obtained above.) */ token = cp_lexer_peek_token (parser->lexer); switch (token->type) { case CPP_NAME: return cp_parser_identifier (parser); case CPP_KEYWORD: if (token->keyword == RID_OPERATOR) return cp_parser_operator_function_id (parser); /* Fall through. */ default: cp_parser_error (parser, "expected id-expression"); return error_mark_node; } } else return cp_parser_unqualified_id (parser, template_keyword_p, /*check_dependency_p=*/true, declarator_p, optional_p); } /* Parse an unqualified-id. unqualified-id: identifier operator-function-id conversion-function-id ~ class-name template-id If TEMPLATE_KEYWORD_P is TRUE, we have just seen the `template' keyword, in a construct like `A::template ...'. Returns a representation of unqualified-id. For the `identifier' production, an IDENTIFIER_NODE is returned. For the `~ class-name' production a BIT_NOT_EXPR is returned; the operand of the BIT_NOT_EXPR is an IDENTIFIER_NODE for the class-name. For the other productions, see the documentation accompanying the corresponding parsing functions. If CHECK_DEPENDENCY_P is false, names are looked up in uninstantiated templates. If DECLARATOR_P is true, the unqualified-id is appearing as part of a declarator, rather than as part of an expression. */ static tree cp_parser_unqualified_id (cp_parser* parser, bool template_keyword_p, bool check_dependency_p, bool declarator_p, bool optional_p) { cp_token *token; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); switch (token->type) { case CPP_NAME: { tree id; /* We don't know yet whether or not this will be a template-id. */ cp_parser_parse_tentatively (parser); /* Try a template-id. */ id = cp_parser_template_id (parser, template_keyword_p, check_dependency_p, declarator_p); /* If it worked, we're done. */ if (cp_parser_parse_definitely (parser)) return id; /* Otherwise, it's an ordinary identifier. */ return cp_parser_identifier (parser); } case CPP_TEMPLATE_ID: return cp_parser_template_id (parser, template_keyword_p, check_dependency_p, declarator_p); case CPP_COMPL: { tree type_decl; tree qualifying_scope; tree object_scope; tree scope; bool done; /* Consume the `~' token. */ cp_lexer_consume_token (parser->lexer); /* Parse the class-name. The standard, as written, seems to say that: template <typename T> struct S { ~S (); }; template <typename T> S<T>::~S() {} is invalid, since `~' must be followed by a class-name, but `S<T>' is dependent, and so not known to be a class. That's not right; we need to look in uninstantiated templates. A further complication arises from: template <typename T> void f(T t) { t.T::~T(); } Here, it is not possible to look up `T' in the scope of `T' itself. We must look in both the current scope, and the scope of the containing complete expression. Yet another issue is: struct S { int S; ~S(); }; S::~S() {} The standard does not seem to say that the `S' in `~S' should refer to the type `S' and not the data member `S::S'. */ /* DR 244 says that we look up the name after the "~" in the same scope as we looked up the qualifying name. That idea isn't fully worked out; it's more complicated than that. */ scope = parser->scope; object_scope = parser->object_scope; qualifying_scope = parser->qualifying_scope; /* Check for invalid scopes. */ if (scope == error_mark_node) { if (cp_lexer_next_token_is (parser->lexer, CPP_NAME)) cp_lexer_consume_token (parser->lexer); return error_mark_node; } if (scope && TREE_CODE (scope) == NAMESPACE_DECL) { if (!cp_parser_uncommitted_to_tentative_parse_p (parser)) error ("scope %qT before %<~%> is not a class-name", scope); cp_parser_simulate_error (parser); if (cp_lexer_next_token_is (parser->lexer, CPP_NAME)) cp_lexer_consume_token (parser->lexer); return error_mark_node; } gcc_assert (!scope || TYPE_P (scope)); /* If the name is of the form "X::~X" it's OK. */ token = cp_lexer_peek_token (parser->lexer); if (scope && token->type == CPP_NAME && (cp_lexer_peek_nth_token (parser->lexer, 2)->type == CPP_OPEN_PAREN) && constructor_name_p (token->u.value, scope)) { cp_lexer_consume_token (parser->lexer); return build_nt (BIT_NOT_EXPR, scope); } /* If there was an explicit qualification (S::~T), first look in the scope given by the qualification (i.e., S). */ done = false; type_decl = NULL_TREE; if (scope) { cp_parser_parse_tentatively (parser); type_decl = cp_parser_class_name (parser, /*typename_keyword_p=*/false, /*template_keyword_p=*/false, none_type, /*check_dependency=*/false, /*class_head_p=*/false, declarator_p); if (cp_parser_parse_definitely (parser)) done = true; } /* In "N::S::~S", look in "N" as well. */ if (!done && scope && qualifying_scope) { cp_parser_parse_tentatively (parser); parser->scope = qualifying_scope; parser->object_scope = NULL_TREE; parser->qualifying_scope = NULL_TREE; type_decl = cp_parser_class_name (parser, /*typename_keyword_p=*/false, /*template_keyword_p=*/false, none_type, /*check_dependency=*/false, /*class_head_p=*/false, declarator_p); if (cp_parser_parse_definitely (parser)) done = true; } /* In "p->S::~T", look in the scope given by "*p" as well. */ else if (!done && object_scope) { cp_parser_parse_tentatively (parser); parser->scope = object_scope; parser->object_scope = NULL_TREE; parser->qualifying_scope = NULL_TREE; type_decl = cp_parser_class_name (parser, /*typename_keyword_p=*/false, /*template_keyword_p=*/false, none_type, /*check_dependency=*/false, /*class_head_p=*/false, declarator_p); if (cp_parser_parse_definitely (parser)) done = true; } /* Look in the surrounding context. */ if (!done) { parser->scope = NULL_TREE; parser->object_scope = NULL_TREE; parser->qualifying_scope = NULL_TREE; type_decl = cp_parser_class_name (parser, /*typename_keyword_p=*/false, /*template_keyword_p=*/false, none_type, /*check_dependency=*/false, /*class_head_p=*/false, declarator_p); } /* If an error occurred, assume that the name of the destructor is the same as the name of the qualifying class. That allows us to keep parsing after running into ill-formed destructor names. */ if (type_decl == error_mark_node && scope) return build_nt (BIT_NOT_EXPR, scope); else if (type_decl == error_mark_node) return error_mark_node; /* Check that destructor name and scope match. */ if (declarator_p && scope && !check_dtor_name (scope, type_decl)) { if (!cp_parser_uncommitted_to_tentative_parse_p (parser)) error ("declaration of %<~%T%> as member of %qT", type_decl, scope); cp_parser_simulate_error (parser); return error_mark_node; } /* [class.dtor] A typedef-name that names a class shall not be used as the identifier in the declarator for a destructor declaration. */ if (declarator_p && !DECL_IMPLICIT_TYPEDEF_P (type_decl) && !DECL_SELF_REFERENCE_P (type_decl) && !cp_parser_uncommitted_to_tentative_parse_p (parser)) error ("typedef-name %qD used as destructor declarator", type_decl); return build_nt (BIT_NOT_EXPR, TREE_TYPE (type_decl)); } /* APPLE LOCAL begin CW asm blocks C++ */ case CPP_NUMBER: { if (flag_iasm_blocks && inside_iasm_block && TREE_CODE (token->u.value) == INTEGER_CST) { char buf[60]; sprintf (buf, HOST_WIDE_INT_PRINT_UNSIGNED, tree_low_cst (token->u.value, 0)); cp_lexer_consume_token (parser->lexer); return get_identifier (buf); } goto bad; } /* APPLE LOCAL end CW asm blocks C++ */ case CPP_KEYWORD: if (token->keyword == RID_OPERATOR) { tree id; /* This could be a template-id, so we try that first. */ cp_parser_parse_tentatively (parser); /* Try a template-id. */ id = cp_parser_template_id (parser, template_keyword_p, /*check_dependency_p=*/true, declarator_p); /* If that worked, we're done. */ if (cp_parser_parse_definitely (parser)) return id; /* We still don't know whether we're looking at an operator-function-id or a conversion-function-id. */ cp_parser_parse_tentatively (parser); /* Try an operator-function-id. */ id = cp_parser_operator_function_id (parser); /* If that didn't work, try a conversion-function-id. */ if (!cp_parser_parse_definitely (parser)) id = cp_parser_conversion_function_id (parser); return id; } /* Fall through. */ default: if (optional_p) return NULL_TREE; /* APPLE LOCAL CW asm blocks C++ */ bad: cp_parser_error (parser, "expected unqualified-id"); return error_mark_node; } } /* Parse an (optional) nested-name-specifier. nested-name-specifier: class-or-namespace-name :: nested-name-specifier [opt] class-or-namespace-name :: template nested-name-specifier [opt] PARSER->SCOPE should be set appropriately before this function is called. TYPENAME_KEYWORD_P is TRUE if the `typename' keyword is in effect. TYPE_P is TRUE if we non-type bindings should be ignored in name lookups. Sets PARSER->SCOPE to the class (TYPE) or namespace (NAMESPACE_DECL) specified by the nested-name-specifier, or leaves it unchanged if there is no nested-name-specifier. Returns the new scope iff there is a nested-name-specifier, or NULL_TREE otherwise. If IS_DECLARATION is TRUE, the nested-name-specifier is known to be part of a declaration and/or decl-specifier. */ static tree cp_parser_nested_name_specifier_opt (cp_parser *parser, bool typename_keyword_p, bool check_dependency_p, bool type_p, bool is_declaration) { bool success = false; cp_token_position start = 0; cp_token *token; /* Remember where the nested-name-specifier starts. */ if (cp_parser_uncommitted_to_tentative_parse_p (parser)) { start = cp_lexer_token_position (parser->lexer, false); push_deferring_access_checks (dk_deferred); } while (true) { tree new_scope; tree old_scope; tree saved_qualifying_scope; bool template_keyword_p; /* Spot cases that cannot be the beginning of a nested-name-specifier. */ token = cp_lexer_peek_token (parser->lexer); /* If the next token is CPP_NESTED_NAME_SPECIFIER, just process the already parsed nested-name-specifier. */ if (token->type == CPP_NESTED_NAME_SPECIFIER) { /* Grab the nested-name-specifier and continue the loop. */ cp_parser_pre_parsed_nested_name_specifier (parser); /* If we originally encountered this nested-name-specifier with IS_DECLARATION set to false, we will not have resolved TYPENAME_TYPEs, so we must do so here. */ if (is_declaration && TREE_CODE (parser->scope) == TYPENAME_TYPE) { new_scope = resolve_typename_type (parser->scope, /*only_current_p=*/false); if (new_scope != error_mark_node) parser->scope = new_scope; } success = true; continue; } /* Spot cases that cannot be the beginning of a nested-name-specifier. On the second and subsequent times through the loop, we look for the `template' keyword. */ if (success && token->keyword == RID_TEMPLATE) ; /* A template-id can start a nested-name-specifier. */ else if (token->type == CPP_TEMPLATE_ID) ; else { /* If the next token is not an identifier, then it is definitely not a class-or-namespace-name. */ if (token->type != CPP_NAME) break; /* If the following token is neither a `<' (to begin a template-id), nor a `::', then we are not looking at a nested-name-specifier. */ token = cp_lexer_peek_nth_token (parser->lexer, 2); if (token->type != CPP_SCOPE && !cp_parser_nth_token_starts_template_argument_list_p (parser, 2)) break; } /* The nested-name-specifier is optional, so we parse tentatively. */ cp_parser_parse_tentatively (parser); /* Look for the optional `template' keyword, if this isn't the first time through the loop. */ if (success) template_keyword_p = cp_parser_optional_template_keyword (parser); else template_keyword_p = false; /* Save the old scope since the name lookup we are about to do might destroy it. */ old_scope = parser->scope; saved_qualifying_scope = parser->qualifying_scope; /* In a declarator-id like "X<T>::I::Y<T>" we must be able to look up names in "X<T>::I" in order to determine that "Y" is a template. So, if we have a typename at this point, we make an effort to look through it. */ if (is_declaration && !typename_keyword_p && parser->scope && TREE_CODE (parser->scope) == TYPENAME_TYPE) parser->scope = resolve_typename_type (parser->scope, /*only_current_p=*/false); /* Parse the qualifying entity. */ new_scope = cp_parser_class_or_namespace_name (parser, typename_keyword_p, template_keyword_p, check_dependency_p, type_p, is_declaration); /* Look for the `::' token. */ cp_parser_require (parser, CPP_SCOPE, "`::'"); /* If we found what we wanted, we keep going; otherwise, we're done. */ if (!cp_parser_parse_definitely (parser)) { bool error_p = false; /* Restore the OLD_SCOPE since it was valid before the failed attempt at finding the last class-or-namespace-name. */ parser->scope = old_scope; parser->qualifying_scope = saved_qualifying_scope; if (cp_parser_uncommitted_to_tentative_parse_p (parser)) break; /* If the next token is an identifier, and the one after that is a `::', then any valid interpretation would have found a class-or-namespace-name. */ while (cp_lexer_next_token_is (parser->lexer, CPP_NAME) && (cp_lexer_peek_nth_token (parser->lexer, 2)->type == CPP_SCOPE) && (cp_lexer_peek_nth_token (parser->lexer, 3)->type != CPP_COMPL)) { token = cp_lexer_consume_token (parser->lexer); if (!error_p) { if (!token->ambiguous_p) { tree decl; tree ambiguous_decls; decl = cp_parser_lookup_name (parser, token->u.value, none_type, /*is_template=*/false, /*is_namespace=*/false, /*check_dependency=*/true, &ambiguous_decls); if (TREE_CODE (decl) == TEMPLATE_DECL) error ("%qD used without template parameters", decl); else if (ambiguous_decls) { error ("reference to %qD is ambiguous", token->u.value); print_candidates (ambiguous_decls); decl = error_mark_node; } else cp_parser_name_lookup_error (parser, token->u.value, decl, "is not a class or namespace"); } parser->scope = error_mark_node; error_p = true; /* Treat this as a successful nested-name-specifier due to: [basic.lookup.qual] If the name found is not a class-name (clause _class_) or namespace-name (_namespace.def_), the program is ill-formed. */ success = true; } cp_lexer_consume_token (parser->lexer); } break; } /* We've found one valid nested-name-specifier. */ success = true; /* Name lookup always gives us a DECL. */ if (TREE_CODE (new_scope) == TYPE_DECL) new_scope = TREE_TYPE (new_scope); /* Uses of "template" must be followed by actual templates. */ if (template_keyword_p && !(CLASS_TYPE_P (new_scope) && ((CLASSTYPE_USE_TEMPLATE (new_scope) && PRIMARY_TEMPLATE_P (CLASSTYPE_TI_TEMPLATE (new_scope))) || CLASSTYPE_IS_TEMPLATE (new_scope))) && !(TREE_CODE (new_scope) == TYPENAME_TYPE && (TREE_CODE (TYPENAME_TYPE_FULLNAME (new_scope)) == TEMPLATE_ID_EXPR))) pedwarn (TYPE_P (new_scope) ? "%qT is not a template" : "%qD is not a template", new_scope); /* If it is a class scope, try to complete it; we are about to be looking up names inside the class. */ if (TYPE_P (new_scope) /* Since checking types for dependency can be expensive, avoid doing it if the type is already complete. */ && !COMPLETE_TYPE_P (new_scope) /* Do not try to complete dependent types. */ && !dependent_type_p (new_scope)) new_scope = complete_type (new_scope); /* Make sure we look in the right scope the next time through the loop. */ parser->scope = new_scope; } /* If parsing tentatively, replace the sequence of tokens that makes up the nested-name-specifier with a CPP_NESTED_NAME_SPECIFIER token. That way, should we re-parse the token stream, we will not have to repeat the effort required to do the parse, nor will we issue duplicate error messages. */ if (success && start) { cp_token *token; token = cp_lexer_token_at (parser->lexer, start); /* Reset the contents of the START token. */ token->type = CPP_NESTED_NAME_SPECIFIER; /* Retrieve any deferred checks. Do not pop this access checks yet so the memory will not be reclaimed during token replacing below. */ token->u.tree_check_value = GGC_CNEW (struct tree_check); token->u.tree_check_value->value = parser->scope; token->u.tree_check_value->checks = get_deferred_access_checks (); token->u.tree_check_value->qualifying_scope = parser->qualifying_scope; token->keyword = RID_MAX; /* Purge all subsequent tokens. */ cp_lexer_purge_tokens_after (parser->lexer, start); } if (start) pop_to_parent_deferring_access_checks (); return success ? parser->scope : NULL_TREE; } /* Parse a nested-name-specifier. See cp_parser_nested_name_specifier_opt for details. This function behaves identically, except that it will an issue an error if no nested-name-specifier is present. */ static tree cp_parser_nested_name_specifier (cp_parser *parser, bool typename_keyword_p, bool check_dependency_p, bool type_p, bool is_declaration) { tree scope; /* Look for the nested-name-specifier. */ scope = cp_parser_nested_name_specifier_opt (parser, typename_keyword_p, check_dependency_p, type_p, is_declaration); /* If it was not present, issue an error message. */ if (!scope) { cp_parser_error (parser, "expected nested-name-specifier"); parser->scope = NULL_TREE; } return scope; } /* Parse a class-or-namespace-name. class-or-namespace-name: class-name namespace-name TYPENAME_KEYWORD_P is TRUE iff the `typename' keyword is in effect. TEMPLATE_KEYWORD_P is TRUE iff the `template' keyword is in effect. CHECK_DEPENDENCY_P is FALSE iff dependent names should be looked up. TYPE_P is TRUE iff the next name should be taken as a class-name, even the same name is declared to be another entity in the same scope. Returns the class (TYPE_DECL) or namespace (NAMESPACE_DECL) specified by the class-or-namespace-name. If neither is found the ERROR_MARK_NODE is returned. */ static tree cp_parser_class_or_namespace_name (cp_parser *parser, bool typename_keyword_p, bool template_keyword_p, bool check_dependency_p, bool type_p, bool is_declaration) { tree saved_scope; tree saved_qualifying_scope; tree saved_object_scope; tree scope; bool only_class_p; /* Before we try to parse the class-name, we must save away the current PARSER->SCOPE since cp_parser_class_name will destroy it. */ saved_scope = parser->scope; saved_qualifying_scope = parser->qualifying_scope; saved_object_scope = parser->object_scope; /* Try for a class-name first. If the SAVED_SCOPE is a type, then there is no need to look for a namespace-name. */ only_class_p = template_keyword_p || (saved_scope && TYPE_P (saved_scope)); if (!only_class_p) cp_parser_parse_tentatively (parser); scope = cp_parser_class_name (parser, typename_keyword_p, template_keyword_p, type_p ? class_type : none_type, check_dependency_p, /*class_head_p=*/false, is_declaration); /* If that didn't work, try for a namespace-name. */ if (!only_class_p && !cp_parser_parse_definitely (parser)) { /* Restore the saved scope. */ parser->scope = saved_scope; parser->qualifying_scope = saved_qualifying_scope; parser->object_scope = saved_object_scope; /* If we are not looking at an identifier followed by the scope resolution operator, then this is not part of a nested-name-specifier. (Note that this function is only used to parse the components of a nested-name-specifier.) */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_NAME) || cp_lexer_peek_nth_token (parser->lexer, 2)->type != CPP_SCOPE) return error_mark_node; scope = cp_parser_namespace_name (parser); } return scope; } /* Parse a postfix-expression. postfix-expression: primary-expression postfix-expression [ expression ] postfix-expression ( expression-list [opt] ) simple-type-specifier ( expression-list [opt] ) typename :: [opt] nested-name-specifier identifier ( expression-list [opt] ) typename :: [opt] nested-name-specifier template [opt] template-id ( expression-list [opt] ) postfix-expression . template [opt] id-expression postfix-expression -> template [opt] id-expression postfix-expression . pseudo-destructor-name postfix-expression -> pseudo-destructor-name postfix-expression ++ postfix-expression -- dynamic_cast < type-id > ( expression ) static_cast < type-id > ( expression ) reinterpret_cast < type-id > ( expression ) const_cast < type-id > ( expression ) typeid ( expression ) typeid ( type-id ) GNU Extension: postfix-expression: ( type-id ) { initializer-list , [opt] } This extension is a GNU version of the C99 compound-literal construct. (The C99 grammar uses `type-name' instead of `type-id', but they are essentially the same concept.) If ADDRESS_P is true, the postfix expression is the operand of the `&' operator. CAST_P is true if this expression is the target of a cast. Returns a representation of the expression. */ static tree cp_parser_postfix_expression (cp_parser *parser, bool address_p, bool cast_p) { cp_token *token; enum rid keyword; cp_id_kind idk = CP_ID_KIND_NONE; tree postfix_expression = NULL_TREE; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* Some of the productions are determined by keywords. */ keyword = token->keyword; switch (keyword) { case RID_DYNCAST: case RID_STATCAST: case RID_REINTCAST: case RID_CONSTCAST: { tree type; tree expression; const char *saved_message; /* All of these can be handled in the same way from the point of view of parsing. Begin by consuming the token identifying the cast. */ cp_lexer_consume_token (parser->lexer); /* New types cannot be defined in the cast. */ saved_message = parser->type_definition_forbidden_message; parser->type_definition_forbidden_message = "types may not be defined in casts"; /* Look for the opening `<'. */ cp_parser_require (parser, CPP_LESS, "`<'"); /* Parse the type to which we are casting. */ type = cp_parser_type_id (parser); /* Look for the closing `>'. */ cp_parser_require (parser, CPP_GREATER, "`>'"); /* Restore the old message. */ parser->type_definition_forbidden_message = saved_message; /* And the expression which is being cast. */ cp_parser_require (parser, CPP_OPEN_PAREN, "`('"); expression = cp_parser_expression (parser, /*cast_p=*/true); cp_parser_require (parser, CPP_CLOSE_PAREN, "`)'"); /* Only type conversions to integral or enumeration types can be used in constant-expressions. */ if (!cast_valid_in_integral_constant_expression_p (type) && (cp_parser_non_integral_constant_expression (parser, "a cast to a type other than an integral or " "enumeration type"))) return error_mark_node; switch (keyword) { case RID_DYNCAST: postfix_expression = build_dynamic_cast (type, expression); break; case RID_STATCAST: postfix_expression = build_static_cast (type, expression); break; case RID_REINTCAST: postfix_expression = build_reinterpret_cast (type, expression); break; case RID_CONSTCAST: postfix_expression = build_const_cast (type, expression); break; default: gcc_unreachable (); } } break; case RID_TYPEID: { tree type; const char *saved_message; bool saved_in_type_id_in_expr_p; /* Consume the `typeid' token. */ cp_lexer_consume_token (parser->lexer); /* Look for the `(' token. */ cp_parser_require (parser, CPP_OPEN_PAREN, "`('"); /* Types cannot be defined in a `typeid' expression. */ saved_message = parser->type_definition_forbidden_message; parser->type_definition_forbidden_message = "types may not be defined in a `typeid\' expression"; /* We can't be sure yet whether we're looking at a type-id or an expression. */ cp_parser_parse_tentatively (parser); /* Try a type-id first. */ saved_in_type_id_in_expr_p = parser->in_type_id_in_expr_p; parser->in_type_id_in_expr_p = true; type = cp_parser_type_id (parser); parser->in_type_id_in_expr_p = saved_in_type_id_in_expr_p; /* Look for the `)' token. Otherwise, we can't be sure that we're not looking at an expression: consider `typeid (int (3))', for example. */ cp_parser_require (parser, CPP_CLOSE_PAREN, "`)'"); /* If all went well, simply lookup the type-id. */ if (cp_parser_parse_definitely (parser)) postfix_expression = get_typeid (type); /* Otherwise, fall back to the expression variant. */ else { tree expression; /* Look for an expression. */ expression = cp_parser_expression (parser, /*cast_p=*/false); /* Compute its typeid. */ postfix_expression = build_typeid (expression); /* Look for the `)' token. */ cp_parser_require (parser, CPP_CLOSE_PAREN, "`)'"); } /* Restore the saved message. */ parser->type_definition_forbidden_message = saved_message; /* `typeid' may not appear in an integral constant expression. */ if (cp_parser_non_integral_constant_expression(parser, "`typeid' operator")) return error_mark_node; } break; case RID_TYPENAME: { tree type; /* The syntax permitted here is the same permitted for an elaborated-type-specifier. */ type = cp_parser_elaborated_type_specifier (parser, /*is_friend=*/false, /*is_declaration=*/false); postfix_expression = cp_parser_functional_cast (parser, type); } break; default: { tree type; /* If the next thing is a simple-type-specifier, we may be looking at a functional cast. We could also be looking at an id-expression. So, we try the functional cast, and if that doesn't work we fall back to the primary-expression. */ cp_parser_parse_tentatively (parser); /* Look for the simple-type-specifier. */ type = cp_parser_simple_type_specifier (parser, /*decl_specs=*/NULL, CP_PARSER_FLAGS_NONE); /* Parse the cast itself. */ if (!cp_parser_error_occurred (parser)) postfix_expression = cp_parser_functional_cast (parser, type); /* If that worked, we're done. */ if (cp_parser_parse_definitely (parser)) break; /* If the functional-cast didn't work out, try a compound-literal. */ if (cp_parser_allow_gnu_extensions_p (parser) && cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN)) { VEC(constructor_elt,gc) *initializer_list = NULL; bool saved_in_type_id_in_expr_p; cp_parser_parse_tentatively (parser); /* Consume the `('. */ cp_lexer_consume_token (parser->lexer); /* Parse the type. */ saved_in_type_id_in_expr_p = parser->in_type_id_in_expr_p; parser->in_type_id_in_expr_p = true; type = cp_parser_type_id (parser); parser->in_type_id_in_expr_p = saved_in_type_id_in_expr_p; /* Look for the `)'. */ cp_parser_require (parser, CPP_CLOSE_PAREN, "`)'"); /* Look for the `{'. */ cp_parser_require (parser, CPP_OPEN_BRACE, "`{'"); /* If things aren't going well, there's no need to keep going. */ if (!cp_parser_error_occurred (parser)) { bool non_constant_p; /* Parse the initializer-list. */ initializer_list = cp_parser_initializer_list (parser, &non_constant_p); /* Allow a trailing `,'. */ if (cp_lexer_next_token_is (parser->lexer, CPP_COMMA)) cp_lexer_consume_token (parser->lexer); /* Look for the final `}'. */ cp_parser_require (parser, CPP_CLOSE_BRACE, "`}'"); } /* If that worked, we're definitely looking at a compound-literal expression. */ if (cp_parser_parse_definitely (parser)) { /* Warn the user that a compound literal is not allowed in standard C++. */ /* APPLE LOCAL Altivec initializers 3068233 */ if (pedantic && TREE_CODE (type) != VECTOR_TYPE) pedwarn ("ISO C++ forbids compound-literals"); /* For simplicitly, we disallow compound literals in constant-expressions for simpliicitly. We could allow compound literals of integer type, whose initializer was a constant, in constant expressions. Permitting that usage, as a further extension, would not change the meaning of any currently accepted programs. (Of course, as compound literals are not part of ISO C++, the standard has nothing to say.) */ if (cp_parser_non_integral_constant_expression (parser, "non-constant compound literals")) { postfix_expression = error_mark_node; break; } /* Form the representation of the compound-literal. */ postfix_expression = finish_compound_literal (type, initializer_list); break; } } /* It must be a primary-expression. */ postfix_expression = cp_parser_primary_expression (parser, address_p, cast_p, /*template_arg_p=*/false, &idk); } break; } /* Keep looping until the postfix-expression is complete. */ while (true) { if (idk == CP_ID_KIND_UNQUALIFIED && TREE_CODE (postfix_expression) == IDENTIFIER_NODE && cp_lexer_next_token_is_not (parser->lexer, CPP_OPEN_PAREN)) /* It is not a Koenig lookup function call. */ postfix_expression = unqualified_name_lookup_error (postfix_expression); /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); switch (token->type) { case CPP_OPEN_SQUARE: postfix_expression = cp_parser_postfix_open_square_expression (parser, postfix_expression, false); idk = CP_ID_KIND_NONE; break; case CPP_OPEN_PAREN: /* postfix-expression ( expression-list [opt] ) */ { bool koenig_p; bool is_builtin_constant_p; bool saved_integral_constant_expression_p = false; bool saved_non_integral_constant_expression_p = false; tree args; is_builtin_constant_p = DECL_IS_BUILTIN_CONSTANT_P (postfix_expression); if (is_builtin_constant_p) { /* The whole point of __builtin_constant_p is to allow non-constant expressions to appear as arguments. */ saved_integral_constant_expression_p = parser->integral_constant_expression_p; saved_non_integral_constant_expression_p = parser->non_integral_constant_expression_p; parser->integral_constant_expression_p = false; } args = (cp_parser_parenthesized_expression_list (parser, /*is_attribute_list=*/false, /*cast_p=*/false, /*non_constant_p=*/NULL)); if (is_builtin_constant_p) { parser->integral_constant_expression_p = saved_integral_constant_expression_p; parser->non_integral_constant_expression_p = saved_non_integral_constant_expression_p; } if (args == error_mark_node) { postfix_expression = error_mark_node; break; } /* Function calls are not permitted in constant-expressions. */ if (! builtin_valid_in_constant_expr_p (postfix_expression) && cp_parser_non_integral_constant_expression (parser, "a function call")) { postfix_expression = error_mark_node; break; } koenig_p = false; if (idk == CP_ID_KIND_UNQUALIFIED) { if (TREE_CODE (postfix_expression) == IDENTIFIER_NODE) { if (args) { koenig_p = true; postfix_expression = perform_koenig_lookup (postfix_expression, args); } else postfix_expression = unqualified_fn_lookup_error (postfix_expression); } /* We do not perform argument-dependent lookup if normal lookup finds a non-function, in accordance with the expected resolution of DR 218. */ else if (args && is_overloaded_fn (postfix_expression)) { tree fn = get_first_fn (postfix_expression); if (TREE_CODE (fn) == TEMPLATE_ID_EXPR) fn = OVL_CURRENT (TREE_OPERAND (fn, 0)); /* Only do argument dependent lookup if regular lookup does not find a set of member functions. [basic.lookup.koenig]/2a */ if (!DECL_FUNCTION_MEMBER_P (fn)) { koenig_p = true; postfix_expression = perform_koenig_lookup (postfix_expression, args); } } } if (TREE_CODE (postfix_expression) == COMPONENT_REF) { tree instance = TREE_OPERAND (postfix_expression, 0); tree fn = TREE_OPERAND (postfix_expression, 1); if (processing_template_decl && (type_dependent_expression_p (instance) || (!BASELINK_P (fn) && TREE_CODE (fn) != FIELD_DECL) || type_dependent_expression_p (fn) || any_type_dependent_arguments_p (args))) { postfix_expression = build_min_nt (CALL_EXPR, postfix_expression, args, NULL_TREE); break; } if (BASELINK_P (fn)) postfix_expression = (build_new_method_call (instance, fn, args, NULL_TREE, (idk == CP_ID_KIND_QUALIFIED ? LOOKUP_NONVIRTUAL : LOOKUP_NORMAL), /*fn_p=*/NULL)); else postfix_expression = finish_call_expr (postfix_expression, args, /*disallow_virtual=*/false, /*koenig_p=*/false); } else if (TREE_CODE (postfix_expression) == OFFSET_REF || TREE_CODE (postfix_expression) == MEMBER_REF || TREE_CODE (postfix_expression) == DOTSTAR_EXPR) postfix_expression = (build_offset_ref_call_from_tree (postfix_expression, args)); else if (idk == CP_ID_KIND_QUALIFIED) /* A call to a static class member, or a namespace-scope function. */ postfix_expression = finish_call_expr (postfix_expression, args, /*disallow_virtual=*/true, koenig_p); else /* All other function calls. */ postfix_expression = finish_call_expr (postfix_expression, args, /*disallow_virtual=*/false, koenig_p); /* The POSTFIX_EXPRESSION is certainly no longer an id. */ idk = CP_ID_KIND_NONE; } break; case CPP_DOT: case CPP_DEREF: /* postfix-expression . template [opt] id-expression postfix-expression . pseudo-destructor-name postfix-expression -> template [opt] id-expression postfix-expression -> pseudo-destructor-name */ /* Consume the `.' or `->' operator. */ cp_lexer_consume_token (parser->lexer); postfix_expression = cp_parser_postfix_dot_deref_expression (parser, token->type, postfix_expression, false, &idk); break; case CPP_PLUS_PLUS: /* postfix-expression ++ */ /* Consume the `++' token. */ cp_lexer_consume_token (parser->lexer); /* Generate a representation for the complete expression. */ postfix_expression = finish_increment_expr (postfix_expression, POSTINCREMENT_EXPR); /* Increments may not appear in constant-expressions. */ if (cp_parser_non_integral_constant_expression (parser, "an increment")) postfix_expression = error_mark_node; idk = CP_ID_KIND_NONE; break; case CPP_MINUS_MINUS: /* postfix-expression -- */ /* Consume the `--' token. */ cp_lexer_consume_token (parser->lexer); /* Generate a representation for the complete expression. */ postfix_expression = finish_increment_expr (postfix_expression, POSTDECREMENT_EXPR); /* Decrements may not appear in constant-expressions. */ if (cp_parser_non_integral_constant_expression (parser, "a decrement")) postfix_expression = error_mark_node; idk = CP_ID_KIND_NONE; break; default: return postfix_expression; } } /* We should never get here. */ gcc_unreachable (); return error_mark_node; } /* A subroutine of cp_parser_postfix_expression that also gets hijacked by cp_parser_builtin_offsetof. We're looking for postfix-expression [ expression ] FOR_OFFSETOF is set if we're being called in that context, which changes how we deal with integer constant expressions. */ static tree cp_parser_postfix_open_square_expression (cp_parser *parser, tree postfix_expression, bool for_offsetof) { tree index; /* Consume the `[' token. */ cp_lexer_consume_token (parser->lexer); /* Parse the index expression. */ /* ??? For offsetof, there is a question of what to allow here. If offsetof is not being used in an integral constant expression context, then we *could* get the right answer by computing the value at runtime. If we are in an integral constant expression context, then we might could accept any constant expression; hard to say without analysis. Rather than open the barn door too wide right away, allow only integer constant expressions here. */ if (for_offsetof) index = cp_parser_constant_expression (parser, false, NULL); else index = cp_parser_expression (parser, /*cast_p=*/false); /* Look for the closing `]'. */ cp_parser_require (parser, CPP_CLOSE_SQUARE, "`]'"); /* APPLE LOCAL begin CW asm blocks */ if (inside_iasm_block) if (TREE_CODE (postfix_expression) == BRACKET_EXPR || TREE_CODE (index) == IDENTIFIER_NODE || TREE_TYPE (index) == NULL_TREE) return iasm_build_bracket (postfix_expression, index); /* APPLE LOCAL end CW asm blocks */ /* Build the ARRAY_REF. */ postfix_expression = grok_array_decl (postfix_expression, index); /* When not doing offsetof, array references are not permitted in constant-expressions. */ if (!for_offsetof && (cp_parser_non_integral_constant_expression (parser, "an array reference"))) postfix_expression = error_mark_node; return postfix_expression; } /* A subroutine of cp_parser_postfix_expression that also gets hijacked by cp_parser_builtin_offsetof. We're looking for postfix-expression . template [opt] id-expression postfix-expression . pseudo-destructor-name postfix-expression -> template [opt] id-expression postfix-expression -> pseudo-destructor-name FOR_OFFSETOF is set if we're being called in that context. That sorta limits what of the above we'll actually accept, but nevermind. TOKEN_TYPE is the "." or "->" token, which will already have been removed from the stream. */ static tree cp_parser_postfix_dot_deref_expression (cp_parser *parser, enum cpp_ttype token_type, tree postfix_expression, bool for_offsetof, cp_id_kind *idk) { tree name; bool dependent_p; bool pseudo_destructor_p; tree scope = NULL_TREE; /* If this is a `->' operator, dereference the pointer. */ if (token_type == CPP_DEREF) postfix_expression = build_x_arrow (postfix_expression); /* Check to see whether or not the expression is type-dependent. */ dependent_p = type_dependent_expression_p (postfix_expression); /* The identifier following the `->' or `.' is not qualified. */ parser->scope = NULL_TREE; parser->qualifying_scope = NULL_TREE; parser->object_scope = NULL_TREE; *idk = CP_ID_KIND_NONE; /* Enter the scope corresponding to the type of the object given by the POSTFIX_EXPRESSION. */ if (!dependent_p && TREE_TYPE (postfix_expression) != NULL_TREE) { scope = TREE_TYPE (postfix_expression); /* According to the standard, no expression should ever have reference type. Unfortunately, we do not currently match the standard in this respect in that our internal representation of an expression may have reference type even when the standard says it does not. Therefore, we have to manually obtain the underlying type here. */ scope = non_reference (scope); /* The type of the POSTFIX_EXPRESSION must be complete. */ if (scope == unknown_type_node) { error ("%qE does not have class type", postfix_expression); scope = NULL_TREE; } else scope = complete_type_or_else (scope, NULL_TREE); /* Let the name lookup machinery know that we are processing a class member access expression. */ parser->context->object_type = scope; /* If something went wrong, we want to be able to discern that case, as opposed to the case where there was no SCOPE due to the type of expression being dependent. */ if (!scope) scope = error_mark_node; /* If the SCOPE was erroneous, make the various semantic analysis functions exit quickly -- and without issuing additional error messages. */ if (scope == error_mark_node) postfix_expression = error_mark_node; } /* Assume this expression is not a pseudo-destructor access. */ pseudo_destructor_p = false; /* If the SCOPE is a scalar type, then, if this is a valid program, we must be looking at a pseudo-destructor-name. */ if (scope && SCALAR_TYPE_P (scope)) { tree s; tree type; cp_parser_parse_tentatively (parser); /* Parse the pseudo-destructor-name. */ s = NULL_TREE; cp_parser_pseudo_destructor_name (parser, &s, &type); if (cp_parser_parse_definitely (parser)) { pseudo_destructor_p = true; postfix_expression = finish_pseudo_destructor_expr (postfix_expression, s, TREE_TYPE (type)); } } if (!pseudo_destructor_p) { /* If the SCOPE is not a scalar type, we are looking at an ordinary class member access expression, rather than a pseudo-destructor-name. */ bool template_p; /* Parse the id-expression. */ name = (cp_parser_id_expression (parser, cp_parser_optional_template_keyword (parser), /*check_dependency_p=*/true, &template_p, /*declarator_p=*/false, /*optional_p=*/false)); /* In general, build a SCOPE_REF if the member name is qualified. However, if the name was not dependent and has already been resolved; there is no need to build the SCOPE_REF. For example; struct X { void f(); }; template <typename T> void f(T* t) { t->X::f(); } Even though "t" is dependent, "X::f" is not and has been resolved to a BASELINK; there is no need to include scope information. */ /* But we do need to remember that there was an explicit scope for virtual function calls. */ if (parser->scope) *idk = CP_ID_KIND_QUALIFIED; /* If the name is a template-id that names a type, we will get a TYPE_DECL here. That is invalid code. */ if (TREE_CODE (name) == TYPE_DECL) { error ("invalid use of %qD", name); postfix_expression = error_mark_node; } else { if (name != error_mark_node && !BASELINK_P (name) && parser->scope) { name = build_qualified_name (/*type=*/NULL_TREE, parser->scope, name, template_p); parser->scope = NULL_TREE; parser->qualifying_scope = NULL_TREE; parser->object_scope = NULL_TREE; } if (scope && name && BASELINK_P (name)) adjust_result_of_qualified_name_lookup (name, BINFO_TYPE (BASELINK_ACCESS_BINFO (name)), scope); postfix_expression = finish_class_member_access_expr (postfix_expression, name, template_p); } } /* We no longer need to look up names in the scope of the object on the left-hand side of the `.' or `->' operator. */ parser->context->object_type = NULL_TREE; /* Outside of offsetof, these operators may not appear in constant-expressions. */ if (!for_offsetof && (cp_parser_non_integral_constant_expression (parser, token_type == CPP_DEREF ? "'->'" : "`.'"))) postfix_expression = error_mark_node; return postfix_expression; } /* Parse a parenthesized expression-list. expression-list: assignment-expression expression-list, assignment-expression attribute-list: expression-list identifier identifier, expression-list CAST_P is true if this expression is the target of a cast. Returns a TREE_LIST. The TREE_VALUE of each node is a representation of an assignment-expression. Note that a TREE_LIST is returned even if there is only a single expression in the list. error_mark_node is returned if the ( and or ) are missing. NULL_TREE is returned on no expressions. The parentheses are eaten. IS_ATTRIBUTE_LIST is true if this is really an attribute list being parsed. If NON_CONSTANT_P is non-NULL, *NON_CONSTANT_P indicates whether or not all of the expressions in the list were constant. */ static tree cp_parser_parenthesized_expression_list (cp_parser* parser, bool is_attribute_list, bool cast_p, bool *non_constant_p) { tree expression_list = NULL_TREE; bool fold_expr_p = is_attribute_list; tree identifier = NULL_TREE; /* Assume all the expressions will be constant. */ if (non_constant_p) *non_constant_p = false; if (!cp_parser_require (parser, CPP_OPEN_PAREN, "`('")) return error_mark_node; /* Consume expressions until there are no more. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_CLOSE_PAREN)) while (true) { tree expr; /* At the beginning of attribute lists, check to see if the next token is an identifier. */ if (is_attribute_list && cp_lexer_peek_token (parser->lexer)->type == CPP_NAME) { cp_token *token; /* Consume the identifier. */ token = cp_lexer_consume_token (parser->lexer); /* Save the identifier. */ identifier = token->u.value; } else { /* Parse the next assignment-expression. */ if (non_constant_p) { bool expr_non_constant_p; expr = (cp_parser_constant_expression (parser, /*allow_non_constant_p=*/true, &expr_non_constant_p)); if (expr_non_constant_p) *non_constant_p = true; } else expr = cp_parser_assignment_expression (parser, cast_p); if (fold_expr_p) expr = fold_non_dependent_expr (expr); /* Add it to the list. We add error_mark_node expressions to the list, so that we can still tell if the correct form for a parenthesized expression-list is found. That gives better errors. */ expression_list = tree_cons (NULL_TREE, expr, expression_list); if (expr == error_mark_node) goto skip_comma; } /* After the first item, attribute lists look the same as expression lists. */ is_attribute_list = false; get_comma:; /* If the next token isn't a `,', then we are done. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_COMMA)) break; /* Otherwise, consume the `,' and keep going. */ cp_lexer_consume_token (parser->lexer); } if (!cp_parser_require (parser, CPP_CLOSE_PAREN, "`)'")) { int ending; skip_comma:; /* We try and resync to an unnested comma, as that will give the user better diagnostics. */ ending = cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true, /*or_comma=*/true, /*consume_paren=*/true); if (ending < 0) goto get_comma; if (!ending) return error_mark_node; } /* We built up the list in reverse order so we must reverse it now. */ expression_list = nreverse (expression_list); if (identifier) expression_list = tree_cons (NULL_TREE, identifier, expression_list); return expression_list; } /* Parse a pseudo-destructor-name. pseudo-destructor-name: :: [opt] nested-name-specifier [opt] type-name :: ~ type-name :: [opt] nested-name-specifier template template-id :: ~ type-name :: [opt] nested-name-specifier [opt] ~ type-name If either of the first two productions is used, sets *SCOPE to the TYPE specified before the final `::'. Otherwise, *SCOPE is set to NULL_TREE. *TYPE is set to the TYPE_DECL for the final type-name, or ERROR_MARK_NODE if the parse fails. */ static void cp_parser_pseudo_destructor_name (cp_parser* parser, tree* scope, tree* type) { bool nested_name_specifier_p; /* Assume that things will not work out. */ *type = error_mark_node; /* Look for the optional `::' operator. */ cp_parser_global_scope_opt (parser, /*current_scope_valid_p=*/true); /* Look for the optional nested-name-specifier. */ nested_name_specifier_p = (cp_parser_nested_name_specifier_opt (parser, /*typename_keyword_p=*/false, /*check_dependency_p=*/true, /*type_p=*/false, /*is_declaration=*/true) != NULL_TREE); /* Now, if we saw a nested-name-specifier, we might be doing the second production. */ if (nested_name_specifier_p && cp_lexer_next_token_is_keyword (parser->lexer, RID_TEMPLATE)) { /* Consume the `template' keyword. */ cp_lexer_consume_token (parser->lexer); /* Parse the template-id. */ cp_parser_template_id (parser, /*template_keyword_p=*/true, /*check_dependency_p=*/false, /*is_declaration=*/true); /* Look for the `::' token. */ cp_parser_require (parser, CPP_SCOPE, "`::'"); } /* If the next token is not a `~', then there might be some additional qualification. */ else if (cp_lexer_next_token_is_not (parser->lexer, CPP_COMPL)) { /* Look for the type-name. */ *scope = TREE_TYPE (cp_parser_type_name (parser)); if (*scope == error_mark_node) return; /* If we don't have ::~, then something has gone wrong. Since the only caller of this function is looking for something after `.' or `->' after a scalar type, most likely the program is trying to get a member of a non-aggregate type. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_SCOPE) || cp_lexer_peek_nth_token (parser->lexer, 2)->type != CPP_COMPL) { cp_parser_error (parser, "request for member of non-aggregate type"); return; } /* Look for the `::' token. */ cp_parser_require (parser, CPP_SCOPE, "`::'"); } else *scope = NULL_TREE; /* Look for the `~'. */ cp_parser_require (parser, CPP_COMPL, "`~'"); /* Look for the type-name again. We are not responsible for checking that it matches the first type-name. */ *type = cp_parser_type_name (parser); } /* Parse a unary-expression. unary-expression: postfix-expression ++ cast-expression -- cast-expression unary-operator cast-expression sizeof unary-expression sizeof ( type-id ) new-expression delete-expression GNU Extensions: unary-expression: __extension__ cast-expression __alignof__ unary-expression __alignof__ ( type-id ) __real__ cast-expression __imag__ cast-expression && identifier ADDRESS_P is true iff the unary-expression is appearing as the operand of the `&' operator. CAST_P is true if this expression is the target of a cast. Returns a representation of the expression. */ static tree cp_parser_unary_expression (cp_parser *parser, bool address_p, bool cast_p) { cp_token *token; enum tree_code unary_operator; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* Some keywords give away the kind of expression. */ if (token->type == CPP_KEYWORD) { enum rid keyword = token->keyword; switch (keyword) { /* APPLE LOCAL begin CW asm blocks */ case RID_SIZEOF: if (inside_iasm_block) break; case RID_ALIGNOF: /* APPLE LOCAL end CW asm blocks */ { tree operand; enum tree_code op; op = keyword == RID_ALIGNOF ? ALIGNOF_EXPR : SIZEOF_EXPR; /* Consume the token. */ cp_lexer_consume_token (parser->lexer); /* Parse the operand. */ operand = cp_parser_sizeof_operand (parser, keyword); if (TYPE_P (operand)) return cxx_sizeof_or_alignof_type (operand, op, true); else return cxx_sizeof_or_alignof_expr (operand, op); } case RID_NEW: return cp_parser_new_expression (parser); case RID_DELETE: return cp_parser_delete_expression (parser); case RID_EXTENSION: { /* The saved value of the PEDANTIC flag. */ int saved_pedantic; tree expr; /* Save away the PEDANTIC flag. */ cp_parser_extension_opt (parser, &saved_pedantic); /* Parse the cast-expression. */ expr = cp_parser_simple_cast_expression (parser); /* Restore the PEDANTIC flag. */ pedantic = saved_pedantic; return expr; } case RID_REALPART: case RID_IMAGPART: { tree expression; /* Consume the `__real__' or `__imag__' token. */ cp_lexer_consume_token (parser->lexer); /* Parse the cast-expression. */ expression = cp_parser_simple_cast_expression (parser); /* Create the complete representation. */ return build_x_unary_op ((keyword == RID_REALPART ? REALPART_EXPR : IMAGPART_EXPR), expression); } break; default: break; } } /* Look for the `:: new' and `:: delete', which also signal the beginning of a new-expression, or delete-expression, respectively. If the next token is `::', then it might be one of these. */ if (cp_lexer_next_token_is (parser->lexer, CPP_SCOPE)) { enum rid keyword; /* See if the token after the `::' is one of the keywords in which we're interested. */ keyword = cp_lexer_peek_nth_token (parser->lexer, 2)->keyword; /* If it's `new', we have a new-expression. */ if (keyword == RID_NEW) return cp_parser_new_expression (parser); /* Similarly, for `delete'. */ else if (keyword == RID_DELETE) return cp_parser_delete_expression (parser); } /* Look for a unary operator. */ unary_operator = cp_parser_unary_operator (token); /* APPLE LOCAL begin CW asm blocks */ /* In the context of CW asm block, '*' followed by '+' or '-' is for relative branch syntax. This is to allow "b *+8" which is disallwed by darwin's assembler but nevertheless is needed to be compatible with CW tools. */ if (inside_iasm_block && unary_operator == INDIRECT_REF) { cp_token *token = cp_lexer_peek_nth_token (parser->lexer, 2); if (token->type == CPP_PLUS || token->type == CPP_MINUS) unary_operator = ERROR_MARK; } /* APPLE LOCAL end CW asm blocks */ /* The `++' and `--' operators can be handled similarly, even though they are not technically unary-operators in the grammar. */ if (unary_operator == ERROR_MARK) { if (token->type == CPP_PLUS_PLUS) unary_operator = PREINCREMENT_EXPR; else if (token->type == CPP_MINUS_MINUS) unary_operator = PREDECREMENT_EXPR; /* Handle the GNU address-of-label extension. */ else if (cp_parser_allow_gnu_extensions_p (parser) && token->type == CPP_AND_AND) { tree identifier; /* Consume the '&&' token. */ cp_lexer_consume_token (parser->lexer); /* Look for the identifier. */ identifier = cp_parser_identifier (parser); /* Create an expression representing the address. */ return finish_label_address_expr (identifier); } } if (unary_operator != ERROR_MARK) { tree cast_expression; tree expression = error_mark_node; const char *non_constant_p = NULL; /* Consume the operator token. */ token = cp_lexer_consume_token (parser->lexer); /* Parse the cast-expression. */ cast_expression = cp_parser_cast_expression (parser, unary_operator == ADDR_EXPR, /*cast_p=*/false); /* Now, build an appropriate representation. */ switch (unary_operator) { case INDIRECT_REF: non_constant_p = "`*'"; expression = build_x_indirect_ref (cast_expression, "unary *"); break; case ADDR_EXPR: non_constant_p = "`&'"; /* Fall through. */ case BIT_NOT_EXPR: /* APPLE LOCAL begin CW asm blocks */ if (inside_iasm_block && unary_operator == ADDR_EXPR && TREE_CODE (cast_expression) == LABEL_DECL) { expression = finish_label_address_expr (DECL_NAME (cast_expression)); break; } /* APPLE LOCAL end CW asm blocks */ expression = build_x_unary_op (unary_operator, cast_expression); break; case PREINCREMENT_EXPR: case PREDECREMENT_EXPR: non_constant_p = (unary_operator == PREINCREMENT_EXPR ? "`++'" : "`--'"); /* Fall through. */ case UNARY_PLUS_EXPR: case NEGATE_EXPR: case TRUTH_NOT_EXPR: /* APPLE LOCAL begin CW asm blocks */ if (inside_iasm_block && TREE_TYPE (cast_expression) == 0) { expression = build1 (unary_operator, NULL_TREE, cast_expression); break; } /* APPLE LOCAL end CW asm blocks */ expression = finish_unary_op_expr (unary_operator, cast_expression); break; default: gcc_unreachable (); } if (non_constant_p && cp_parser_non_integral_constant_expression (parser, non_constant_p)) expression = error_mark_node; return expression; } /* APPLE LOCAL begin CW asm blocks */ /* Postfix expressions in CW asm are more restricted and handled quite differently, so diverge from the usual expression precedence sequence here. */ if (inside_iasm_block) return cp_parser_iasm_postfix_expression (parser, address_p, cast_p); /* APPLE LOCAL end CW asm blocks */ return cp_parser_postfix_expression (parser, address_p, cast_p); } /* Returns ERROR_MARK if TOKEN is not a unary-operator. If TOKEN is a unary-operator, the corresponding tree code is returned. */ static enum tree_code cp_parser_unary_operator (cp_token* token) { switch (token->type) { case CPP_MULT: return INDIRECT_REF; case CPP_AND: return ADDR_EXPR; case CPP_PLUS: return UNARY_PLUS_EXPR; case CPP_MINUS: return NEGATE_EXPR; case CPP_NOT: return TRUTH_NOT_EXPR; case CPP_COMPL: return BIT_NOT_EXPR; /* APPLE LOCAL begin CW asm blocks */ case CPP_NAME: if (iasm_state >= iasm_decls && flag_ms_asms && strcasecmp (IDENTIFIER_POINTER (token->u.value), "offset") == 0) return ADDR_EXPR; /* APPLE LOCAL end CW asm blocks */ default: return ERROR_MARK; } } /* Parse a new-expression. new-expression: :: [opt] new new-placement [opt] new-type-id new-initializer [opt] :: [opt] new new-placement [opt] ( type-id ) new-initializer [opt] Returns a representation of the expression. */ static tree cp_parser_new_expression (cp_parser* parser) { bool global_scope_p; tree placement; tree type; tree initializer; tree nelts; /* Look for the optional `::' operator. */ global_scope_p = (cp_parser_global_scope_opt (parser, /*current_scope_valid_p=*/false) != NULL_TREE); /* Look for the `new' operator. */ cp_parser_require_keyword (parser, RID_NEW, "`new'"); /* There's no easy way to tell a new-placement from the `( type-id )' construct. */ cp_parser_parse_tentatively (parser); /* Look for a new-placement. */ placement = cp_parser_new_placement (parser); /* If that didn't work out, there's no new-placement. */ if (!cp_parser_parse_definitely (parser)) placement = NULL_TREE; /* If the next token is a `(', then we have a parenthesized type-id. */ if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN)) { /* Consume the `('. */ cp_lexer_consume_token (parser->lexer); /* Parse the type-id. */ type = cp_parser_type_id (parser); /* Look for the closing `)'. */ cp_parser_require (parser, CPP_CLOSE_PAREN, "`)'"); /* There should not be a direct-new-declarator in this production, but GCC used to allowed this, so we check and emit a sensible error message for this case. */ if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_SQUARE)) { error ("array bound forbidden after parenthesized type-id"); inform ("try removing the parentheses around the type-id"); cp_parser_direct_new_declarator (parser); } nelts = NULL_TREE; } /* Otherwise, there must be a new-type-id. */ else type = cp_parser_new_type_id (parser, &nelts); /* If the next token is a `(', then we have a new-initializer. */ if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN)) initializer = cp_parser_new_initializer (parser); else initializer = NULL_TREE; /* A new-expression may not appear in an integral constant expression. */ if (cp_parser_non_integral_constant_expression (parser, "`new'")) return error_mark_node; /* Create a representation of the new-expression. */ return build_new (placement, type, nelts, initializer, global_scope_p); } /* Parse a new-placement. new-placement: ( expression-list ) Returns the same representation as for an expression-list. */ static tree cp_parser_new_placement (cp_parser* parser) { tree expression_list; /* Parse the expression-list. */ expression_list = (cp_parser_parenthesized_expression_list (parser, false, /*cast_p=*/false, /*non_constant_p=*/NULL)); return expression_list; } /* Parse a new-type-id. new-type-id: type-specifier-seq new-declarator [opt] Returns the TYPE allocated. If the new-type-id indicates an array type, *NELTS is set to the number of elements in the last array bound; the TYPE will not include the last array bound. */ static tree cp_parser_new_type_id (cp_parser* parser, tree *nelts) { cp_decl_specifier_seq type_specifier_seq; cp_declarator *new_declarator; cp_declarator *declarator; cp_declarator *outer_declarator; const char *saved_message; tree type; /* The type-specifier sequence must not contain type definitions. (It cannot contain declarations of new types either, but if they are not definitions we will catch that because they are not complete.) */ saved_message = parser->type_definition_forbidden_message; parser->type_definition_forbidden_message = "types may not be defined in a new-type-id"; /* Parse the type-specifier-seq. */ cp_parser_type_specifier_seq (parser, /*is_condition=*/false, &type_specifier_seq); /* Restore the old message. */ parser->type_definition_forbidden_message = saved_message; /* Parse the new-declarator. */ new_declarator = cp_parser_new_declarator_opt (parser); /* Determine the number of elements in the last array dimension, if any. */ *nelts = NULL_TREE; /* Skip down to the last array dimension. */ declarator = new_declarator; outer_declarator = NULL; while (declarator && (declarator->kind == cdk_pointer || declarator->kind == cdk_ptrmem)) { outer_declarator = declarator; declarator = declarator->declarator; } while (declarator && declarator->kind == cdk_array && declarator->declarator && declarator->declarator->kind == cdk_array) { outer_declarator = declarator; declarator = declarator->declarator; } if (declarator && declarator->kind == cdk_array) { *nelts = declarator->u.array.bounds; if (*nelts == error_mark_node) *nelts = integer_one_node; if (outer_declarator) outer_declarator->declarator = declarator->declarator; else new_declarator = NULL; } type = groktypename (&type_specifier_seq, new_declarator); if (TREE_CODE (type) == ARRAY_TYPE && *nelts == NULL_TREE) { *nelts = array_type_nelts_top (type); type = TREE_TYPE (type); } return type; } /* Parse an (optional) new-declarator. new-declarator: ptr-operator new-declarator [opt] direct-new-declarator Returns the declarator. */ static cp_declarator * cp_parser_new_declarator_opt (cp_parser* parser) { enum tree_code code; tree type; cp_cv_quals cv_quals; /* We don't know if there's a ptr-operator next, or not. */ cp_parser_parse_tentatively (parser); /* Look for a ptr-operator. */ code = cp_parser_ptr_operator (parser, &type, &cv_quals); /* If that worked, look for more new-declarators. */ if (cp_parser_parse_definitely (parser)) { cp_declarator *declarator; /* Parse another optional declarator. */ declarator = cp_parser_new_declarator_opt (parser); /* Create the representation of the declarator. */ if (type) declarator = make_ptrmem_declarator (cv_quals, type, declarator); else if (code == INDIRECT_REF) declarator = make_pointer_declarator (cv_quals, declarator); else declarator = make_reference_declarator (cv_quals, declarator); return declarator; } /* If the next token is a `[', there is a direct-new-declarator. */ if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_SQUARE)) return cp_parser_direct_new_declarator (parser); return NULL; } /* Parse a direct-new-declarator. direct-new-declarator: [ expression ] direct-new-declarator [constant-expression] */ static cp_declarator * cp_parser_direct_new_declarator (cp_parser* parser) { cp_declarator *declarator = NULL; while (true) { tree expression; /* Look for the opening `['. */ cp_parser_require (parser, CPP_OPEN_SQUARE, "`['"); /* The first expression is not required to be constant. */ if (!declarator) { expression = cp_parser_expression (parser, /*cast_p=*/false); /* The standard requires that the expression have integral type. DR 74 adds enumeration types. We believe that the real intent is that these expressions be handled like the expression in a `switch' condition, which also allows classes with a single conversion to integral or enumeration type. */ if (!processing_template_decl) { expression = build_expr_type_conversion (WANT_INT | WANT_ENUM, expression, /*complain=*/true); if (!expression) { error ("expression in new-declarator must have integral " "or enumeration type"); expression = error_mark_node; } } } /* But all the other expressions must be. */ else expression = cp_parser_constant_expression (parser, /*allow_non_constant=*/false, NULL); /* Look for the closing `]'. */ cp_parser_require (parser, CPP_CLOSE_SQUARE, "`]'"); /* Add this bound to the declarator. */ declarator = make_array_declarator (declarator, expression); /* If the next token is not a `[', then there are no more bounds. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_OPEN_SQUARE)) break; } return declarator; } /* Parse a new-initializer. new-initializer: ( expression-list [opt] ) Returns a representation of the expression-list. If there is no expression-list, VOID_ZERO_NODE is returned. */ static tree cp_parser_new_initializer (cp_parser* parser) { tree expression_list; expression_list = (cp_parser_parenthesized_expression_list (parser, false, /*cast_p=*/false, /*non_constant_p=*/NULL)); if (!expression_list) expression_list = void_zero_node; return expression_list; } /* Parse a delete-expression. delete-expression: :: [opt] delete cast-expression :: [opt] delete [ ] cast-expression Returns a representation of the expression. */ static tree cp_parser_delete_expression (cp_parser* parser) { bool global_scope_p; bool array_p; tree expression; /* Look for the optional `::' operator. */ global_scope_p = (cp_parser_global_scope_opt (parser, /*current_scope_valid_p=*/false) != NULL_TREE); /* Look for the `delete' keyword. */ cp_parser_require_keyword (parser, RID_DELETE, "`delete'"); /* See if the array syntax is in use. */ if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_SQUARE)) { /* Consume the `[' token. */ cp_lexer_consume_token (parser->lexer); /* Look for the `]' token. */ cp_parser_require (parser, CPP_CLOSE_SQUARE, "`]'"); /* Remember that this is the `[]' construct. */ array_p = true; } else array_p = false; /* Parse the cast-expression. */ expression = cp_parser_simple_cast_expression (parser); /* A delete-expression may not appear in an integral constant expression. */ if (cp_parser_non_integral_constant_expression (parser, "`delete'")) return error_mark_node; return delete_sanity (expression, NULL_TREE, array_p, global_scope_p); } /* Parse a cast-expression. cast-expression: unary-expression ( type-id ) cast-expression ADDRESS_P is true iff the unary-expression is appearing as the operand of the `&' operator. CAST_P is true if this expression is the target of a cast. Returns a representation of the expression. */ static tree cp_parser_cast_expression (cp_parser *parser, bool address_p, bool cast_p) { /* If it's a `(', then we might be looking at a cast. */ if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN)) { tree type = NULL_TREE; tree expr = NULL_TREE; bool compound_literal_p; const char *saved_message; /* There's no way to know yet whether or not this is a cast. For example, `(int (3))' is a unary-expression, while `(int) 3' is a cast. So, we resort to parsing tentatively. */ cp_parser_parse_tentatively (parser); /* Types may not be defined in a cast. */ saved_message = parser->type_definition_forbidden_message; parser->type_definition_forbidden_message = "types may not be defined in casts"; /* Consume the `('. */ cp_lexer_consume_token (parser->lexer); /* A very tricky bit is that `(struct S) { 3 }' is a compound-literal (which we permit in C++ as an extension). But, that construct is not a cast-expression -- it is a postfix-expression. (The reason is that `(struct S) { 3 }.i' is legal; if the compound-literal were a cast-expression, you'd need an extra set of parentheses.) But, if we parse the type-id, and it happens to be a class-specifier, then we will commit to the parse at that point, because we cannot undo the action that is done when creating a new class. So, then we cannot back up and do a postfix-expression. Therefore, we scan ahead to the closing `)', and check to see if the token after the `)' is a `{'. If so, we are not looking at a cast-expression. Save tokens so that we can put them back. */ cp_lexer_save_tokens (parser->lexer); /* Skip tokens until the next token is a closing parenthesis. If we find the closing `)', and the next token is a `{', then we are looking at a compound-literal. */ compound_literal_p = (cp_parser_skip_to_closing_parenthesis (parser, false, false, /*consume_paren=*/true) && cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE)); /* Roll back the tokens we skipped. */ cp_lexer_rollback_tokens (parser->lexer); /* If we were looking at a compound-literal, simulate an error so that the call to cp_parser_parse_definitely below will fail. */ if (compound_literal_p) cp_parser_simulate_error (parser); else { bool saved_in_type_id_in_expr_p = parser->in_type_id_in_expr_p; parser->in_type_id_in_expr_p = true; /* Look for the type-id. */ type = cp_parser_type_id (parser); /* Look for the closing `)'. */ cp_parser_require (parser, CPP_CLOSE_PAREN, "`)'"); parser->in_type_id_in_expr_p = saved_in_type_id_in_expr_p; } /* Restore the saved message. */ parser->type_definition_forbidden_message = saved_message; /* If ok so far, parse the dependent expression. We cannot be sure it is a cast. Consider `(T ())'. It is a parenthesized ctor of T, but looks like a cast to function returning T without a dependent expression. */ if (!cp_parser_error_occurred (parser)) expr = cp_parser_cast_expression (parser, /*address_p=*/false, /*cast_p=*/true); if (cp_parser_parse_definitely (parser)) { /* Warn about old-style casts, if so requested. */ if (warn_old_style_cast && !in_system_header && !VOID_TYPE_P (type) && current_lang_name != lang_name_c) warning (OPT_Wold_style_cast, "use of old-style cast"); /* Only type conversions to integral or enumeration types can be used in constant-expressions. */ if (!cast_valid_in_integral_constant_expression_p (type) && (cp_parser_non_integral_constant_expression (parser, "a cast to a type other than an integral or " "enumeration type"))) return error_mark_node; /* Perform the cast. */ expr = build_c_cast (type, expr); /* APPLE LOCAL begin radar 4426814 */ return (c_dialect_objc() && flag_objc_gc) /* APPLE LOCAL radar 5276085 */ ? objc_build_weak_reference_tree (expr) : expr; /* APPLE LOCAL end radar 4426814 */ } } /* If we get here, then it's not a cast, so it must be a unary-expression. */ /* APPLE LOCAL begin radar 4426814 */ if (c_dialect_objc() && flag_objc_gc) /* APPLE LOCAL radar 5276085 */ return objc_build_weak_reference_tree ( cp_parser_unary_expression (parser, address_p, cast_p)); else return cp_parser_unary_expression (parser, address_p, cast_p); /* APPLE LOCAL end radar 4426814 */ } /* Parse a binary expression of the general form: pm-expression: cast-expression pm-expression .* cast-expression pm-expression ->* cast-expression multiplicative-expression: pm-expression multiplicative-expression * pm-expression multiplicative-expression / pm-expression multiplicative-expression % pm-expression additive-expression: multiplicative-expression additive-expression + multiplicative-expression additive-expression - multiplicative-expression shift-expression: additive-expression shift-expression << additive-expression shift-expression >> additive-expression relational-expression: shift-expression relational-expression < shift-expression relational-expression > shift-expression relational-expression <= shift-expression relational-expression >= shift-expression GNU Extension: relational-expression: relational-expression <? shift-expression relational-expression >? shift-expression equality-expression: relational-expression equality-expression == relational-expression equality-expression != relational-expression and-expression: equality-expression and-expression & equality-expression exclusive-or-expression: and-expression exclusive-or-expression ^ and-expression inclusive-or-expression: exclusive-or-expression inclusive-or-expression | exclusive-or-expression logical-and-expression: inclusive-or-expression logical-and-expression && inclusive-or-expression logical-or-expression: logical-and-expression logical-or-expression || logical-and-expression All these are implemented with a single function like: binary-expression: simple-cast-expression binary-expression <token> binary-expression CAST_P is true if this expression is the target of a cast. The binops_by_token map is used to get the tree codes for each <token> type. binary-expressions are associated according to a precedence table. */ #define TOKEN_PRECEDENCE(token) \ ((token->type == CPP_GREATER && !parser->greater_than_is_operator_p) \ ? PREC_NOT_OPERATOR \ : binops_by_token[token->type].prec) static tree cp_parser_binary_expression (cp_parser* parser, bool cast_p) { cp_parser_expression_stack stack; cp_parser_expression_stack_entry *sp = &stack[0]; tree lhs, rhs; cp_token *token; enum tree_code tree_type; enum cp_parser_prec prec = PREC_NOT_OPERATOR, new_prec, lookahead_prec; bool overloaded_p; /* Parse the first expression. */ lhs = cp_parser_cast_expression (parser, /*address_p=*/false, cast_p); for (;;) { /* Get an operator token. */ token = cp_lexer_peek_token (parser->lexer); new_prec = TOKEN_PRECEDENCE (token); /* APPLE LOCAL begin CW asm blocks */ if (flag_iasm_blocks && inside_iasm_block) { if ((token->flags & BOL) != 0) new_prec = PREC_NOT_OPERATOR; } /* APPLE LOCAL end CW asm blocks */ /* Popping an entry off the stack means we completed a subexpression: - either we found a token which is not an operator (`>' where it is not an operator, or prec == PREC_NOT_OPERATOR), in which case popping will happen repeatedly; - or, we found an operator which has lower priority. This is the case where the recursive descent *ascends*, as in `3 * 4 + 5' after parsing `3 * 4'. */ if (new_prec <= prec) { if (sp == stack) break; else goto pop; } get_rhs: tree_type = binops_by_token[token->type].tree_type; /* We used the operator token. */ cp_lexer_consume_token (parser->lexer); /* Extract another operand. It may be the RHS of this expression or the LHS of a new, higher priority expression. */ rhs = cp_parser_simple_cast_expression (parser); /* Get another operator token. Look up its precedence to avoid building a useless (immediately popped) stack entry for common cases such as 3 + 4 + 5 or 3 * 4 + 5. */ token = cp_lexer_peek_token (parser->lexer); lookahead_prec = TOKEN_PRECEDENCE (token); /* APPLE LOCAL begin CW asm blocks */ if (flag_iasm_blocks && inside_iasm_block) { if ((token->flags & BOL) != 0) lookahead_prec = PREC_NOT_OPERATOR; } /* APPLE LOCAL end CW asm blocks */ if (lookahead_prec > new_prec) { /* ... and prepare to parse the RHS of the new, higher priority expression. Since precedence levels on the stack are monotonically increasing, we do not have to care about stack overflows. */ sp->prec = prec; sp->tree_type = tree_type; sp->lhs = lhs; sp++; lhs = rhs; prec = new_prec; new_prec = lookahead_prec; goto get_rhs; pop: /* If the stack is not empty, we have parsed into LHS the right side (`4' in the example above) of an expression we had suspended. We can use the information on the stack to recover the LHS (`3') from the stack together with the tree code (`MULT_EXPR'), and the precedence of the higher level subexpression (`PREC_ADDITIVE_EXPRESSION'). TOKEN is the CPP_PLUS token, which will be used to actually build the additive expression. */ --sp; prec = sp->prec; tree_type = sp->tree_type; rhs = lhs; lhs = sp->lhs; } /* APPLE LOCAL begin CW asm blocks */ if (inside_iasm_block && TREE_CODE (rhs) == COMPOUND_EXPR) { gcc_assert (TREE_CODE (TREE_OPERAND (rhs, 1)) == IDENTIFIER_NODE); lhs = build_x_binary_op (tree_type, lhs, TREE_OPERAND (rhs, 0), &overloaded_p); lhs = iasm_build_register_offset (lhs, TREE_OPERAND (rhs, 1)); return lhs; } if (inside_iasm_block) { if (TREE_CODE (rhs) == IDENTIFIER_NODE || TREE_CODE (lhs) == IDENTIFIER_NODE || TREE_TYPE (rhs) == NULL_TREE || TREE_TYPE (lhs) == NULL_TREE) { lhs = build2 (tree_type, NULL_TREE, lhs, rhs); continue; } } /* APPLE LOCAL end CW asm blocks */ overloaded_p = false; lhs = build_x_binary_op (tree_type, lhs, rhs, &overloaded_p); /* If the binary operator required the use of an overloaded operator, then this expression cannot be an integral constant-expression. An overloaded operator can be used even if both operands are otherwise permissible in an integral constant-expression if at least one of the operands is of enumeration type. */ if (overloaded_p && (cp_parser_non_integral_constant_expression (parser, "calls to overloaded operators"))) return error_mark_node; } return lhs; } /* Parse the `? expression : assignment-expression' part of a conditional-expression. The LOGICAL_OR_EXPR is the logical-or-expression that started the conditional-expression. Returns a representation of the entire conditional-expression. This routine is used by cp_parser_assignment_expression. ? expression : assignment-expression GNU Extensions: ? : assignment-expression */ static tree cp_parser_question_colon_clause (cp_parser* parser, tree logical_or_expr) { tree expr; tree assignment_expr; /* Consume the `?' token. */ cp_lexer_consume_token (parser->lexer); if (cp_parser_allow_gnu_extensions_p (parser) && cp_lexer_next_token_is (parser->lexer, CPP_COLON)) /* Implicit true clause. */ expr = NULL_TREE; else /* Parse the expression. */ expr = cp_parser_expression (parser, /*cast_p=*/false); /* The next token should be a `:'. */ cp_parser_require (parser, CPP_COLON, "`:'"); /* Parse the assignment-expression. */ assignment_expr = cp_parser_assignment_expression (parser, /*cast_p=*/false); /* Build the conditional-expression. */ return build_x_conditional_expr (logical_or_expr, expr, assignment_expr); } /* Parse an assignment-expression. assignment-expression: conditional-expression logical-or-expression assignment-operator assignment_expression throw-expression CAST_P is true if this expression is the target of a cast. Returns a representation for the expression. */ static tree cp_parser_assignment_expression (cp_parser* parser, bool cast_p) { tree expr; /* If the next token is the `throw' keyword, then we're looking at a throw-expression. */ if (cp_lexer_next_token_is_keyword (parser->lexer, RID_THROW)) expr = cp_parser_throw_expression (parser); /* Otherwise, it must be that we are looking at a logical-or-expression. */ else { /* Parse the binary expressions (logical-or-expression). */ expr = cp_parser_binary_expression (parser, cast_p); /* If the next token is a `?' then we're actually looking at a conditional-expression. */ if (cp_lexer_next_token_is (parser->lexer, CPP_QUERY)) return cp_parser_question_colon_clause (parser, expr); else { enum tree_code assignment_operator; /* If it's an assignment-operator, we're using the second production. */ assignment_operator = cp_parser_assignment_operator_opt (parser); if (assignment_operator != ERROR_MARK) { tree rhs; /* Parse the right-hand side of the assignment. */ rhs = cp_parser_assignment_expression (parser, cast_p); /* An assignment may not appear in a constant-expression. */ if (cp_parser_non_integral_constant_expression (parser, "an assignment")) return error_mark_node; /* Build the assignment expression. */ expr = build_x_modify_expr (expr, assignment_operator, rhs); } } } return expr; } /* Parse an (optional) assignment-operator. assignment-operator: one of = *= /= %= += -= >>= <<= &= ^= |= GNU Extension: assignment-operator: one of <?= >?= If the next token is an assignment operator, the corresponding tree code is returned, and the token is consumed. For example, for `+=', PLUS_EXPR is returned. For `=' itself, the code returned is NOP_EXPR. For `/', TRUNC_DIV_EXPR is returned; for `%', TRUNC_MOD_EXPR is returned. If TOKEN is not an assignment operator, ERROR_MARK is returned. */ static enum tree_code cp_parser_assignment_operator_opt (cp_parser* parser) { enum tree_code op; cp_token *token; /* Peek at the next toen. */ token = cp_lexer_peek_token (parser->lexer); switch (token->type) { case CPP_EQ: op = NOP_EXPR; break; case CPP_MULT_EQ: op = MULT_EXPR; break; case CPP_DIV_EQ: op = TRUNC_DIV_EXPR; break; case CPP_MOD_EQ: op = TRUNC_MOD_EXPR; break; case CPP_PLUS_EQ: op = PLUS_EXPR; break; case CPP_MINUS_EQ: op = MINUS_EXPR; break; case CPP_RSHIFT_EQ: op = RSHIFT_EXPR; break; case CPP_LSHIFT_EQ: op = LSHIFT_EXPR; break; case CPP_AND_EQ: op = BIT_AND_EXPR; break; case CPP_XOR_EQ: op = BIT_XOR_EXPR; break; case CPP_OR_EQ: op = BIT_IOR_EXPR; break; default: /* Nothing else is an assignment operator. */ op = ERROR_MARK; } /* If it was an assignment operator, consume it. */ if (op != ERROR_MARK) cp_lexer_consume_token (parser->lexer); return op; } /* Parse an expression. expression: assignment-expression expression , assignment-expression CAST_P is true if this expression is the target of a cast. Returns a representation of the expression. */ static tree cp_parser_expression (cp_parser* parser, bool cast_p) { tree expression = NULL_TREE; while (true) { tree assignment_expression; /* Parse the next assignment-expression. */ assignment_expression = cp_parser_assignment_expression (parser, cast_p); /* If this is the first assignment-expression, we can just save it away. */ if (!expression) expression = assignment_expression; else expression = build_x_compound_expr (expression, assignment_expression); /* If the next token is not a comma, then we are done with the expression. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_COMMA)) break; /* Consume the `,'. */ cp_lexer_consume_token (parser->lexer); /* A comma operator cannot appear in a constant-expression. */ if (cp_parser_non_integral_constant_expression (parser, "a comma operator")) expression = error_mark_node; } return expression; } /* Parse a constant-expression. constant-expression: conditional-expression If ALLOW_NON_CONSTANT_P a non-constant expression is silently accepted. If ALLOW_NON_CONSTANT_P is true and the expression is not constant, *NON_CONSTANT_P is set to TRUE. If ALLOW_NON_CONSTANT_P is false, NON_CONSTANT_P should be NULL. */ static tree cp_parser_constant_expression (cp_parser* parser, bool allow_non_constant_p, bool *non_constant_p) { bool saved_integral_constant_expression_p; bool saved_allow_non_integral_constant_expression_p; bool saved_non_integral_constant_expression_p; tree expression; /* It might seem that we could simply parse the conditional-expression, and then check to see if it were TREE_CONSTANT. However, an expression that is TREE_CONSTANT is one that the compiler can figure out is constant, possibly after doing some simplifications or optimizations. The standard has a precise definition of constant-expression, and we must honor that, even though it is somewhat more restrictive. For example: int i[(2, 3)]; is not a legal declaration, because `(2, 3)' is not a constant-expression. The `,' operator is forbidden in a constant-expression. However, GCC's constant-folding machinery will fold this operation to an INTEGER_CST for `3'. */ /* Save the old settings. */ saved_integral_constant_expression_p = parser->integral_constant_expression_p; saved_allow_non_integral_constant_expression_p = parser->allow_non_integral_constant_expression_p; saved_non_integral_constant_expression_p = parser->non_integral_constant_expression_p; /* We are now parsing a constant-expression. */ parser->integral_constant_expression_p = true; parser->allow_non_integral_constant_expression_p = allow_non_constant_p; parser->non_integral_constant_expression_p = false; /* Although the grammar says "conditional-expression", we parse an "assignment-expression", which also permits "throw-expression" and the use of assignment operators. In the case that ALLOW_NON_CONSTANT_P is false, we get better errors than we would otherwise. In the case that ALLOW_NON_CONSTANT_P is true, it is actually essential that we look for an assignment-expression. For example, cp_parser_initializer_clauses uses this function to determine whether a particular assignment-expression is in fact constant. */ expression = cp_parser_assignment_expression (parser, /*cast_p=*/false); /* Restore the old settings. */ parser->integral_constant_expression_p = saved_integral_constant_expression_p; parser->allow_non_integral_constant_expression_p = saved_allow_non_integral_constant_expression_p; if (allow_non_constant_p) *non_constant_p = parser->non_integral_constant_expression_p; else if (parser->non_integral_constant_expression_p) expression = error_mark_node; parser->non_integral_constant_expression_p = saved_non_integral_constant_expression_p; return expression; } /* Parse __builtin_offsetof. offsetof-expression: "__builtin_offsetof" "(" type-id "," offsetof-member-designator ")" offsetof-member-designator: id-expression | offsetof-member-designator "." id-expression | offsetof-member-designator "[" expression "]" */ static tree cp_parser_builtin_offsetof (cp_parser *parser) { int save_ice_p, save_non_ice_p; tree type, expr; cp_id_kind dummy; /* We're about to accept non-integral-constant things, but will definitely yield an integral constant expression. Save and restore these values around our local parsing. */ save_ice_p = parser->integral_constant_expression_p; save_non_ice_p = parser->non_integral_constant_expression_p; /* Consume the "__builtin_offsetof" token. */ cp_lexer_consume_token (parser->lexer); /* Consume the opening `('. */ cp_parser_require (parser, CPP_OPEN_PAREN, "`('"); /* Parse the type-id. */ type = cp_parser_type_id (parser); /* Look for the `,'. */ cp_parser_require (parser, CPP_COMMA, "`,'"); /* Build the (type *)null that begins the traditional offsetof macro. */ expr = build_static_cast (build_pointer_type (type), null_pointer_node); /* Parse the offsetof-member-designator. We begin as if we saw "expr->". */ expr = cp_parser_postfix_dot_deref_expression (parser, CPP_DEREF, expr, true, &dummy); while (true) { cp_token *token = cp_lexer_peek_token (parser->lexer); switch (token->type) { case CPP_OPEN_SQUARE: /* offsetof-member-designator "[" expression "]" */ expr = cp_parser_postfix_open_square_expression (parser, expr, true); break; case CPP_DOT: /* offsetof-member-designator "." identifier */ cp_lexer_consume_token (parser->lexer); expr = cp_parser_postfix_dot_deref_expression (parser, CPP_DOT, expr, true, &dummy); break; case CPP_CLOSE_PAREN: /* Consume the ")" token. */ cp_lexer_consume_token (parser->lexer); goto success; default: /* Error. We know the following require will fail, but that gives the proper error message. */ cp_parser_require (parser, CPP_CLOSE_PAREN, "`)'"); cp_parser_skip_to_closing_parenthesis (parser, true, false, true); expr = error_mark_node; goto failure; } } success: /* If we're processing a template, we can't finish the semantics yet. Otherwise we can fold the entire expression now. */ if (processing_template_decl) expr = build1 (OFFSETOF_EXPR, size_type_node, expr); else expr = finish_offsetof (expr); failure: parser->integral_constant_expression_p = save_ice_p; parser->non_integral_constant_expression_p = save_non_ice_p; return expr; } /* Statements [gram.stmt.stmt] */ /* Parse a statement. statement: labeled-statement expression-statement compound-statement selection-statement iteration-statement jump-statement declaration-statement try-block IN_COMPOUND is true when the statement is nested inside a cp_parser_compound_statement; this matters for certain pragmas. */ static void cp_parser_statement (cp_parser* parser, tree in_statement_expr, bool in_compound) { tree statement; cp_token *token; location_t statement_location; restart: /* There is no statement yet. */ statement = NULL_TREE; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* Remember the location of the first token in the statement. */ statement_location = token->location; /* If this is a keyword, then that will often determine what kind of statement we have. */ if (token->type == CPP_KEYWORD) { enum rid keyword = token->keyword; switch (keyword) { case RID_CASE: case RID_DEFAULT: /* Looks like a labeled-statement with a case label. Parse the label, and then use tail recursion to parse the statement. */ cp_parser_label_for_labeled_statement (parser); goto restart; case RID_IF: case RID_SWITCH: statement = cp_parser_selection_statement (parser); break; case RID_WHILE: case RID_DO: case RID_FOR: statement = cp_parser_iteration_statement (parser); break; case RID_BREAK: case RID_CONTINUE: case RID_RETURN: case RID_GOTO: statement = cp_parser_jump_statement (parser); break; /* Objective-C++ exception-handling constructs. */ case RID_AT_TRY: case RID_AT_CATCH: case RID_AT_FINALLY: case RID_AT_SYNCHRONIZED: case RID_AT_THROW: statement = cp_parser_objc_statement (parser); break; case RID_TRY: statement = cp_parser_try_block (parser); break; default: /* It might be a keyword like `int' that can start a declaration-statement. */ break; } } else if (token->type == CPP_NAME) { /* If the next token is a `:', then we are looking at a labeled-statement. */ token = cp_lexer_peek_nth_token (parser->lexer, 2); if (token->type == CPP_COLON) { /* Looks like a labeled-statement with an ordinary label. Parse the label, and then use tail recursion to parse the statement. */ cp_parser_label_for_labeled_statement (parser); goto restart; } } /* Anything that starts with a `{' must be a compound-statement. */ else if (token->type == CPP_OPEN_BRACE) /* APPLE LOCAL radar 5982990 */ statement = cp_parser_compound_statement (parser, NULL, false, false); /* CPP_PRAGMA is a #pragma inside a function body, which constitutes a statement all its own. */ else if (token->type == CPP_PRAGMA) { /* Only certain OpenMP pragmas are attached to statements, and thus are considered statements themselves. All others are not. In the context of a compound, accept the pragma as a "statement" and return so that we can check for a close brace. Otherwise we require a real statement and must go back and read one. */ if (in_compound) cp_parser_pragma (parser, pragma_compound); else if (!cp_parser_pragma (parser, pragma_stmt)) goto restart; return; } else if (token->type == CPP_EOF) { cp_parser_error (parser, "expected statement"); return; } /* Everything else must be a declaration-statement or an expression-statement. Try for the declaration-statement first, unless we are looking at a `;', in which case we know that we have an expression-statement. */ if (!statement) { if (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON)) { cp_parser_parse_tentatively (parser); /* Try to parse the declaration-statement. */ cp_parser_declaration_statement (parser); /* If that worked, we're done. */ if (cp_parser_parse_definitely (parser)) return; } /* Look for an expression-statement instead. */ statement = cp_parser_expression_statement (parser, in_statement_expr); } /* Set the line number for the statement. */ if (statement && STATEMENT_CODE_P (TREE_CODE (statement))) SET_EXPR_LOCATION (statement, statement_location); } /* Parse the label for a labeled-statement, i.e. identifier : case constant-expression : default : GNU Extension: case constant-expression ... constant-expression : statement When a label is parsed without errors, the label is added to the parse tree by the finish_* functions, so this function doesn't have to return the label. */ static void cp_parser_label_for_labeled_statement (cp_parser* parser) { cp_token *token; /* The next token should be an identifier. */ token = cp_lexer_peek_token (parser->lexer); if (token->type != CPP_NAME && token->type != CPP_KEYWORD) { cp_parser_error (parser, "expected labeled-statement"); return; } switch (token->keyword) { case RID_CASE: { tree expr, expr_hi; cp_token *ellipsis; /* Consume the `case' token. */ cp_lexer_consume_token (parser->lexer); /* Parse the constant-expression. */ expr = cp_parser_constant_expression (parser, /*allow_non_constant_p=*/false, NULL); ellipsis = cp_lexer_peek_token (parser->lexer); if (ellipsis->type == CPP_ELLIPSIS) { /* Consume the `...' token. */ cp_lexer_consume_token (parser->lexer); expr_hi = cp_parser_constant_expression (parser, /*allow_non_constant_p=*/false, NULL); /* We don't need to emit warnings here, as the common code will do this for us. */ } else expr_hi = NULL_TREE; if (parser->in_switch_statement_p) finish_case_label (expr, expr_hi); else error ("case label %qE not within a switch statement", expr); } break; case RID_DEFAULT: /* Consume the `default' token. */ cp_lexer_consume_token (parser->lexer); if (parser->in_switch_statement_p) finish_case_label (NULL_TREE, NULL_TREE); else error ("case label not within a switch statement"); break; default: /* Anything else must be an ordinary label. */ finish_label_stmt (cp_parser_identifier (parser)); break; } /* Require the `:' token. */ cp_parser_require (parser, CPP_COLON, "`:'"); } /* Parse an expression-statement. expression-statement: expression [opt] ; Returns the new EXPR_STMT -- or NULL_TREE if the expression statement consists of nothing more than an `;'. IN_STATEMENT_EXPR_P indicates whether this expression-statement is part of an expression statement. */ static tree cp_parser_expression_statement (cp_parser* parser, tree in_statement_expr) { tree statement = NULL_TREE; /* If the next token is a ';', then there is no expression statement. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON)) statement = cp_parser_expression (parser, /*cast_p=*/false); /* Consume the final `;'. */ cp_parser_consume_semicolon_at_end_of_statement (parser); if (in_statement_expr && cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_BRACE)) /* This is the final expression statement of a statement expression. */ statement = finish_stmt_expr_expr (statement, in_statement_expr); else if (statement) statement = finish_expr_stmt (statement); else finish_stmt (); return statement; } /* Parse a compound-statement. compound-statement: { statement-seq [opt] } Returns a tree representing the statement. */ static tree cp_parser_compound_statement (cp_parser *parser, tree in_statement_expr, /* APPLE LOCAL radar 5982990 */ bool in_try, bool objc_sjlj_exceptions) { tree compound_stmt; /* Consume the `{'. */ if (!cp_parser_require (parser, CPP_OPEN_BRACE, "`{'")) return error_mark_node; /* Begin the compound-statement. */ compound_stmt = begin_compound_stmt (in_try ? BCS_TRY_BLOCK : 0); /* APPLE LOCAL begin CW asm blocks */ /* Maybe this is the body of an asm function, which has asm lines following the decls. */ if (iasm_state >= iasm_decls) { cp_token *token = cp_lexer_peek_token (parser->lexer); iasm_in_decl = true; if (token->u.value && IASM_SEE_OPCODE (TYPESPEC, token->u.value) == IDENTIFIER) { token->keyword = RID_MAX; token->type = CPP_NAME; } cp_parser_iasm_declaration_seq_opt (parser); iasm_in_decl = false; iasm_state = iasm_asm; inside_iasm_block = true; iasm_kill_regs = true; /* LLVM LOCAL */ iasm_label_counter = 0; cp_parser_iasm_line_seq_opt (parser); iasm_state = iasm_none; iasm_end_block (); } else /* APPLE LOCAL end CW asm blocks */ /* Parse an (optional) statement-seq. */ cp_parser_statement_seq_opt (parser, in_statement_expr); /* APPLE LOCAL begin radar 5982990 */ if (objc_sjlj_exceptions) objc_mark_locals_volatile (NULL); /* APPLE LOCAL end radar 5982990 */ /* Finish the compound-statement. */ finish_compound_stmt (compound_stmt); /* Consume the `}'. */ cp_parser_require (parser, CPP_CLOSE_BRACE, "`}'"); return compound_stmt; } /* APPLE LOCAL begin CW asm blocks */ static bool cp_lexer_iasm_bol (cp_lexer* lexer) { /* We can't use cp_lexer_peek_token here, as it will give errors for things like 1st in MS-stype asm. */ cp_token *token = lexer->next_token; return (token->flags & BOL) != 0; } /* APPLE LOCAL end CW asm blocks */ /* Parse an (optional) statement-seq. statement-seq: statement statement-seq [opt] statement */ static void cp_parser_statement_seq_opt (cp_parser* parser, tree in_statement_expr) { /* APPLE LOCAL begin omit calls to empty destructors 5559195 */ tree class_type = DECL_CONTEXT (current_function_decl); bool determine_destructor_triviality = DECL_DESTRUCTOR_P (current_function_decl) && class_type != NULL_TREE && !CLASSTYPE_DESTRUCTOR_TRIVIALITY_FINAL (class_type); /* Assume that the destructor is trivial at first, and mark nontrivial if any statement is parsed. */ if (determine_destructor_triviality) { CLASSTYPE_HAS_NONTRIVIAL_DESTRUCTOR_BODY (class_type) = 0; CLASSTYPE_DESTRUCTOR_TRIVIALITY_FINAL (class_type) = 1; } /* APPLE LOCAL end omit calls to empty destructors 5559195 */ /* Scan statements until there aren't any more. */ while (true) { cp_token *token = cp_lexer_peek_token (parser->lexer); /* APPLE LOCAL begin ObjC++ 4185810 */ /* If we're looking at a `}', then we've run out of statements; the same is true if we have reached the end of file, or have stumbled upon a stray 'else' or '@end'. */ if (token->type == CPP_CLOSE_BRACE || token->type == CPP_EOF || token->type == CPP_PRAGMA_EOL || (token->type == CPP_KEYWORD && (token->keyword == RID_ELSE || token->keyword == RID_AT_END))) /* APPLE LOCAL end ObjC++ 4185810 */ break; /* APPLE LOCAL begin omit calls to empty destructors 5559195 */ if (determine_destructor_triviality) CLASSTYPE_HAS_NONTRIVIAL_DESTRUCTOR_BODY (class_type) = 1; /* APPLE LOCAL end omit calls to empty destructors 5559195 */ /* Parse the statement. */ cp_parser_statement (parser, in_statement_expr, true); /* APPLE LOCAL begin CW asm blocks */ if (flag_iasm_blocks && iasm_state >= iasm_decls && (cp_lexer_iasm_bol (parser->lexer) || cp_lexer_next_token_is (parser->lexer, CPP_NAME))) break; /* APPLE LOCAL end CW asm blocks */ } } /* Parse a selection-statement. selection-statement: if ( condition ) statement if ( condition ) statement else statement switch ( condition ) statement Returns the new IF_STMT or SWITCH_STMT. */ static tree cp_parser_selection_statement (cp_parser* parser) { cp_token *token; enum rid keyword; /* Peek at the next token. */ token = cp_parser_require (parser, CPP_KEYWORD, "selection-statement"); /* See what kind of keyword it is. */ keyword = token->keyword; switch (keyword) { case RID_IF: case RID_SWITCH: { tree statement; tree condition; /* Look for the `('. */ if (!cp_parser_require (parser, CPP_OPEN_PAREN, "`('")) { cp_parser_skip_to_end_of_statement (parser); return error_mark_node; } /* Begin the selection-statement. */ if (keyword == RID_IF) statement = begin_if_stmt (); else statement = begin_switch_stmt (); /* Parse the condition. */ condition = cp_parser_condition (parser); /* Look for the `)'. */ if (!cp_parser_require (parser, CPP_CLOSE_PAREN, "`)'")) cp_parser_skip_to_closing_parenthesis (parser, true, false, /*consume_paren=*/true); if (keyword == RID_IF) { /* Add the condition. */ finish_if_stmt_cond (condition, statement); /* Parse the then-clause. */ cp_parser_implicitly_scoped_statement (parser); finish_then_clause (statement); /* If the next token is `else', parse the else-clause. */ if (cp_lexer_next_token_is_keyword (parser->lexer, RID_ELSE)) { /* Consume the `else' keyword. */ cp_lexer_consume_token (parser->lexer); begin_else_clause (statement); /* Parse the else-clause. */ cp_parser_implicitly_scoped_statement (parser); finish_else_clause (statement); } /* Now we're all done with the if-statement. */ finish_if_stmt (statement); } else { bool in_switch_statement_p; unsigned char in_statement; /* Add the condition. */ finish_switch_cond (condition, statement); /* Parse the body of the switch-statement. */ in_switch_statement_p = parser->in_switch_statement_p; in_statement = parser->in_statement; parser->in_switch_statement_p = true; parser->in_statement |= IN_SWITCH_STMT; cp_parser_implicitly_scoped_statement (parser); parser->in_switch_statement_p = in_switch_statement_p; parser->in_statement = in_statement; /* Now we're all done with the switch-statement. */ finish_switch_stmt (statement); } return statement; } break; default: cp_parser_error (parser, "expected selection-statement"); return error_mark_node; } } /* Parse a condition. condition: expression type-specifier-seq declarator = assignment-expression GNU Extension: condition: type-specifier-seq declarator asm-specification [opt] attributes [opt] = assignment-expression Returns the expression that should be tested. */ static tree cp_parser_condition (cp_parser* parser) { cp_decl_specifier_seq type_specifiers; const char *saved_message; /* Try the declaration first. */ cp_parser_parse_tentatively (parser); /* New types are not allowed in the type-specifier-seq for a condition. */ saved_message = parser->type_definition_forbidden_message; parser->type_definition_forbidden_message = "types may not be defined in conditions"; /* Parse the type-specifier-seq. */ cp_parser_type_specifier_seq (parser, /*is_condition==*/true, &type_specifiers); /* Restore the saved message. */ parser->type_definition_forbidden_message = saved_message; /* If all is well, we might be looking at a declaration. */ if (!cp_parser_error_occurred (parser)) { tree decl; tree asm_specification; tree attributes; cp_declarator *declarator; tree initializer = NULL_TREE; /* Parse the declarator. */ declarator = cp_parser_declarator (parser, CP_PARSER_DECLARATOR_NAMED, /*ctor_dtor_or_conv_p=*/NULL, /*parenthesized_p=*/NULL, /*member_p=*/false); /* Parse the attributes. */ attributes = cp_parser_attributes_opt (parser); /* Parse the asm-specification. */ asm_specification = cp_parser_asm_specification_opt (parser); /* If the next token is not an `=', then we might still be looking at an expression. For example: if (A(a).x) looks like a decl-specifier-seq and a declarator -- but then there is no `=', so this is an expression. */ cp_parser_require (parser, CPP_EQ, "`='"); /* If we did see an `=', then we are looking at a declaration for sure. */ if (cp_parser_parse_definitely (parser)) { tree pushed_scope; bool non_constant_p; /* Create the declaration. */ decl = start_decl (declarator, &type_specifiers, /*initialized_p=*/true, attributes, /*prefix_attributes=*/NULL_TREE, &pushed_scope); /* Parse the assignment-expression. */ initializer = cp_parser_constant_expression (parser, /*allow_non_constant_p=*/true, &non_constant_p); if (!non_constant_p) initializer = fold_non_dependent_expr (initializer); /* Process the initializer. */ cp_finish_decl (decl, initializer, !non_constant_p, asm_specification, LOOKUP_ONLYCONVERTING); if (pushed_scope) pop_scope (pushed_scope); return convert_from_reference (decl); } } /* If we didn't even get past the declarator successfully, we are definitely not looking at a declaration. */ else cp_parser_abort_tentative_parse (parser); /* Otherwise, we are looking at an expression. */ return cp_parser_expression (parser, /*cast_p=*/false); } /* APPLE LOCAL begin radar 4631818 */ /* This routine looks for objective-c++'s foreach statement by scanning for-loop header looking for either 1) 'for (type selector in...)' or 2) 'for (selector in...)' where selector is already declared in outer scope. If it failed, it undoes the lexical look-ahead and returns false. If it succeeded, it adds the 'selector' to the statement list and returns true. At success, lexer points to token following the 'in' keyword. */ static bool cp_parser_parse_foreach_stmt (cp_parser *parser) { int decl_spec_declares_class_or_enum; bool is_cv_qualifier; tree type_spec; cp_decl_specifier_seq decl_specs; tree node; cp_token *token; bool is_legit_foreach = false; cp_declarator *declarator; /* Exclude class/struct/enum type definition in for-loop header, which is aparently legal in c++. Otherwise, it causes side-effect (type is enterred in function's scope) when type is re-parsed. */ token = cp_lexer_peek_token (parser->lexer); if (cp_parser_token_is_class_key (token) || token->keyword == RID_ENUM) return false; cp_parser_parse_tentatively (parser); clear_decl_specs (&decl_specs); type_spec = cp_parser_type_specifier (parser, CP_PARSER_FLAGS_OPTIONAL, &decl_specs, /*is_declaration=*/true, &decl_spec_declares_class_or_enum, &is_cv_qualifier); declarator = cp_parser_declarator (parser, CP_PARSER_DECLARATOR_NAMED, NULL, /*parenthesized_p=*/NULL, /*member_p=*/false); if (declarator == cp_error_declarator) { cp_parser_abort_tentative_parse (parser); return false; } token = cp_lexer_peek_token (parser->lexer); node = token->u.value; if (node && TREE_CODE (node) == IDENTIFIER_NODE && node == ridpointers [(int) RID_IN]) { enum cpp_ttype nt = cp_lexer_peek_nth_token (parser->lexer, 2)->type; switch (nt) { case CPP_NAME: case CPP_OPEN_PAREN: case CPP_MULT: case CPP_PLUS: case CPP_PLUS_PLUS: case CPP_MINUS: case CPP_MINUS_MINUS: case CPP_OPEN_SQUARE: is_legit_foreach = true; default: break; } } if (is_legit_foreach) { tree pushed_scope = NULL; tree decl; if (type_spec) { /* we have: 'for (type selector in...)' */ cp_parser_commit_to_tentative_parse (parser); decl = start_decl (declarator, &decl_specs, false /*is_initialized*/, NULL_TREE /*attributes*/, NULL_TREE /*prefix_attributes*/, &pushed_scope); /* APPLE LOCAL begin radar 5130983 */ if (!decl || decl == error_mark_node) { error ("selector is undeclared"); is_legit_foreach = false; } else cp_finish_decl (decl, NULL_TREE /*initializer*/, false /*init_const_expr_p=*/, NULL_TREE /*asm_specification*/, 0 /*flags */); } else { tree statement; /* we have: 'for (selector in...)' */ /* Parse it as an expression. */ cp_parser_abort_tentative_parse (parser); statement = cp_parser_expression (parser, /*cast_p=*/false); add_stmt (statement); } /* APPLE LOCAL end radar 5130983 */ /* Consume the 'in' token */ cp_lexer_consume_token (parser->lexer); } else cp_parser_abort_tentative_parse (parser); return is_legit_foreach; } /* APPLE LOCAL end radar 4631818 */ /* APPLE LOCAL begin mainline */ /* We check for a ) immediately followed by ; with no whitespacing between. This is used to issue a warning for: while (...); and: for (...); as the semicolon is probably extraneous. On parse errors, the next token might not be a ), so do nothing in that case. */ static void check_empty_body (cp_parser* parser, const char* type) { cp_token *token; cp_token *close_paren; expanded_location close_loc; expanded_location semi_loc; close_paren = cp_lexer_peek_token (parser->lexer); if (close_paren->type != CPP_CLOSE_PAREN) return; close_loc = expand_location (close_paren->location); token = cp_lexer_peek_nth_token (parser->lexer, 2); if (token->type != CPP_SEMICOLON || (token->flags & PREV_WHITE)) return; semi_loc = expand_location (token->location); if (close_loc.line == semi_loc.line #ifdef USE_MAPPED_LOCATION && close_loc.column+1 == semi_loc.column #endif ) warning (OPT_Wempty_body, "suggest a space before %<;%> or explicit braces around empty " "body in %<%s%> statement", type); } /* APPLE LOCAL end mainline */ /* Parse an iteration-statement. iteration-statement: while ( condition ) statement do statement while ( expression ) ; for ( for-init-statement condition [opt] ; expression [opt] ) statement APPLE LOCAL begin for-fsf-4_4 3274130 5295549 GNU extension: while attributes [opt] ( condition ) statement do attributes [opt] statement while ( expression ) ; for attributes [opt] ( for-init-statement condition [opt] ; expression [opt] ) statement APPLE LOCAL end for-fsf-4_4 3274130 5295549 Returns the new WHILE_STMT, DO_STMT, or FOR_STMT. */ static tree cp_parser_iteration_statement (cp_parser* parser) { cp_token *token; enum rid keyword; /* APPLE LOCAL begin for-fsf-4_4 3274130 5295549 */ \ tree statement, attributes; /* APPLE LOCAL end for-fsf-4_4 3274130 5295549 */ \ unsigned char in_statement; /* APPLE LOCAL begin for-fsf-4_4 3274130 5295549 */ \ /* Get the keyword at the start of the loop. */ /* APPLE LOCAL end for-fsf-4_4 3274130 5295549 */ \ token = cp_parser_require (parser, CPP_KEYWORD, "iteration-statement"); if (!token) return error_mark_node; /* Remember whether or not we are already within an iteration statement. */ in_statement = parser->in_statement; /* APPLE LOCAL begin for-fsf-4_4 3274130 5295549 */ \ /* Parse the attributes, if any. */ attributes = cp_parser_attributes_opt (parser); /* APPLE LOCAL end for-fsf-4_4 3274130 5295549 */ \ /* See what kind of keyword it is. */ keyword = token->keyword; switch (keyword) { case RID_WHILE: { tree condition; /* Begin the while-statement. */ /* APPLE LOCAL begin for-fsf-4_4 3274130 5295549 */ \ statement = begin_while_stmt (attributes); /* APPLE LOCAL end for-fsf-4_4 3274130 5295549 */ \ /* Look for the `('. */ cp_parser_require (parser, CPP_OPEN_PAREN, "`('"); /* Parse the condition. */ condition = cp_parser_condition (parser); finish_while_stmt_cond (condition, statement); /* APPLE LOCAL mainline */ check_empty_body (parser, "while"); /* Look for the `)'. */ cp_parser_require (parser, CPP_CLOSE_PAREN, "`)'"); /* Parse the dependent statement. */ parser->in_statement = IN_ITERATION_STMT; cp_parser_already_scoped_statement (parser); parser->in_statement = in_statement; /* We're done with the while-statement. */ finish_while_stmt (statement); } break; case RID_DO: { tree expression; /* Begin the do-statement. */ /* APPLE LOCAL begin for-fsf-4_4 3274130 5295549 */ \ statement = begin_do_stmt (attributes); /* APPLE LOCAL end for-fsf-4_4 3274130 5295549 */ \ /* Parse the body of the do-statement. */ parser->in_statement = IN_ITERATION_STMT; cp_parser_implicitly_scoped_statement (parser); parser->in_statement = in_statement; finish_do_body (statement); /* Look for the `while' keyword. */ cp_parser_require_keyword (parser, RID_WHILE, "`while'"); /* Look for the `('. */ cp_parser_require (parser, CPP_OPEN_PAREN, "`('"); /* Parse the expression. */ expression = cp_parser_expression (parser, /*cast_p=*/false); /* We're done with the do-statement. */ finish_do_stmt (expression, statement); /* Look for the `)'. */ cp_parser_require (parser, CPP_CLOSE_PAREN, "`)'"); /* Look for the `;'. */ cp_parser_require (parser, CPP_SEMICOLON, "`;'"); } break; case RID_FOR: { tree condition = NULL_TREE; tree expression = NULL_TREE; /* Begin the for-statement. */ /* APPLE LOCAL begin for-fsf-4_4 3274130 5295549 */ \ statement = begin_for_stmt (attributes); /* APPLE LOCAL end for-fsf-4_4 3274130 5295549 */ \ /* Look for the `('. */ cp_parser_require (parser, CPP_OPEN_PAREN, "`('"); /* APPLE LOCAL begin radar 4631818 */ if (c_dialect_objc () && cp_parser_parse_foreach_stmt (parser)) { objc_foreach_stmt (parser, statement); break; } /* APPLE LOCAL end radar 4631818 */ /* Parse the initialization. */ cp_parser_for_init_statement (parser); finish_for_init_stmt (statement); /* If there's a condition, process it. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON)) condition = cp_parser_condition (parser); finish_for_cond (condition, statement); /* Look for the `;'. */ cp_parser_require (parser, CPP_SEMICOLON, "`;'"); /* If there's an expression, process it. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_CLOSE_PAREN)) expression = cp_parser_expression (parser, /*cast_p=*/false); finish_for_expr (expression, statement); /* APPLE LOCAL mainline */ check_empty_body (parser, "for"); /* Look for the `)'. */ cp_parser_require (parser, CPP_CLOSE_PAREN, "`)'"); /* Parse the body of the for-statement. */ parser->in_statement = IN_ITERATION_STMT; cp_parser_already_scoped_statement (parser); parser->in_statement = in_statement; /* We're done with the for-statement. */ finish_for_stmt (statement); } break; default: cp_parser_error (parser, "expected iteration-statement"); statement = error_mark_node; break; } return statement; } /* Parse a for-init-statement. for-init-statement: expression-statement simple-declaration */ static void cp_parser_for_init_statement (cp_parser* parser) { /* If the next token is a `;', then we have an empty expression-statement. Grammatically, this is also a simple-declaration, but an invalid one, because it does not declare anything. Therefore, if we did not handle this case specially, we would issue an error message about an invalid declaration. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON)) { /* We're going to speculatively look for a declaration, falling back to an expression, if necessary. */ cp_parser_parse_tentatively (parser); /* Parse the declaration. */ cp_parser_simple_declaration (parser, /*function_definition_allowed_p=*/false); /* If the tentative parse failed, then we shall need to look for an expression-statement. */ if (cp_parser_parse_definitely (parser)) return; } cp_parser_expression_statement (parser, false); } /* Parse a jump-statement. jump-statement: break ; continue ; return expression [opt] ; goto identifier ; GNU extension: jump-statement: goto * expression ; Returns the new BREAK_STMT, CONTINUE_STMT, RETURN_EXPR, or GOTO_EXPR. */ static tree cp_parser_jump_statement (cp_parser* parser) { tree statement = error_mark_node; cp_token *token; enum rid keyword; /* Peek at the next token. */ token = cp_parser_require (parser, CPP_KEYWORD, "jump-statement"); if (!token) return error_mark_node; /* See what kind of keyword it is. */ keyword = token->keyword; switch (keyword) { case RID_BREAK: switch (parser->in_statement) { case 0: error ("break statement not within loop or switch"); break; default: gcc_assert ((parser->in_statement & IN_SWITCH_STMT) || parser->in_statement == IN_ITERATION_STMT); statement = finish_break_stmt (); break; case IN_OMP_BLOCK: error ("invalid exit from OpenMP structured block"); break; case IN_OMP_FOR: error ("break statement used with OpenMP for loop"); break; } cp_parser_require (parser, CPP_SEMICOLON, "%<;%>"); break; case RID_CONTINUE: switch (parser->in_statement & ~IN_SWITCH_STMT) { case 0: error ("continue statement not within a loop"); break; case IN_ITERATION_STMT: case IN_OMP_FOR: statement = finish_continue_stmt (); break; case IN_OMP_BLOCK: error ("invalid exit from OpenMP structured block"); break; default: gcc_unreachable (); } cp_parser_require (parser, CPP_SEMICOLON, "%<;%>"); break; case RID_RETURN: { tree expr; /* If the next token is a `;', then there is no expression. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON)) expr = cp_parser_expression (parser, /*cast_p=*/false); else expr = NULL_TREE; /* Build the return-statement. */ statement = finish_return_stmt (expr); /* Look for the final `;'. */ cp_parser_require (parser, CPP_SEMICOLON, "%<;%>"); } break; case RID_GOTO: /* APPLE LOCAL begin blocks 6040305 (cb) */ if (cur_block) error ("goto not allowed in block literal"); /* APPLE LOCAL end blocks 6040305 (cb) */ /* Create the goto-statement. */ if (cp_lexer_next_token_is (parser->lexer, CPP_MULT)) { /* Issue a warning about this use of a GNU extension. */ if (pedantic) pedwarn ("ISO C++ forbids computed gotos"); /* Consume the '*' token. */ cp_lexer_consume_token (parser->lexer); /* Parse the dependent expression. */ finish_goto_stmt (cp_parser_expression (parser, /*cast_p=*/false)); } else finish_goto_stmt (cp_parser_identifier (parser)); /* Look for the final `;'. */ cp_parser_require (parser, CPP_SEMICOLON, "%<;%>"); break; default: cp_parser_error (parser, "expected jump-statement"); break; } return statement; } /* Parse a declaration-statement. declaration-statement: block-declaration */ static void cp_parser_declaration_statement (cp_parser* parser) { void *p; /* Get the high-water mark for the DECLARATOR_OBSTACK. */ p = obstack_alloc (&declarator_obstack, 0); /* Parse the block-declaration. */ cp_parser_block_declaration (parser, /*statement_p=*/true); /* Free any declarators allocated. */ obstack_free (&declarator_obstack, p); /* Finish off the statement. */ finish_stmt (); } /* Some dependent statements (like `if (cond) statement'), are implicitly in their own scope. In other words, if the statement is a single statement (as opposed to a compound-statement), it is none-the-less treated as if it were enclosed in braces. Any declarations appearing in the dependent statement are out of scope after control passes that point. This function parses a statement, but ensures that is in its own scope, even if it is not a compound-statement. Returns the new statement. */ static tree cp_parser_implicitly_scoped_statement (cp_parser* parser) { tree statement; /* Mark if () ; with a special NOP_EXPR. */ if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON)) { cp_lexer_consume_token (parser->lexer); statement = add_stmt (build_empty_stmt ()); } /* if a compound is opened, we simply parse the statement directly. */ else if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE)) /* APPLE LOCAL radar 5982990 */ statement = cp_parser_compound_statement (parser, NULL, false, false); /* If the token is not a `{', then we must take special action. */ else { /* Create a compound-statement. */ statement = begin_compound_stmt (0); /* Parse the dependent-statement. */ cp_parser_statement (parser, NULL_TREE, false); /* Finish the dummy compound-statement. */ finish_compound_stmt (statement); } /* Return the statement. */ return statement; } /* For some dependent statements (like `while (cond) statement'), we have already created a scope. Therefore, even if the dependent statement is a compound-statement, we do not want to create another scope. */ static void cp_parser_already_scoped_statement (cp_parser* parser) { /* If the token is a `{', then we must take special action. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_OPEN_BRACE)) cp_parser_statement (parser, NULL_TREE, false); else { /* Avoid calling cp_parser_compound_statement, so that we don't create a new scope. Do everything else by hand. */ cp_parser_require (parser, CPP_OPEN_BRACE, "`{'"); cp_parser_statement_seq_opt (parser, NULL_TREE); cp_parser_require (parser, CPP_CLOSE_BRACE, "`}'"); } } /* Declarations [gram.dcl.dcl] */ /* Parse an optional declaration-sequence. declaration-seq: declaration declaration-seq declaration */ static void cp_parser_declaration_seq_opt (cp_parser* parser) { while (true) { cp_token *token; token = cp_lexer_peek_token (parser->lexer); if (token->type == CPP_CLOSE_BRACE || token->type == CPP_EOF || token->type == CPP_PRAGMA_EOL) break; if (token->type == CPP_SEMICOLON) { /* A declaration consisting of a single semicolon is invalid. Allow it unless we're being pedantic. */ cp_lexer_consume_token (parser->lexer); if (pedantic && !in_system_header) pedwarn ("extra %<;%>"); continue; } /* If we're entering or exiting a region that's implicitly extern "C", modify the lang context appropriately. */ if (!parser->implicit_extern_c && token->implicit_extern_c) { push_lang_context (lang_name_c); parser->implicit_extern_c = true; } else if (parser->implicit_extern_c && !token->implicit_extern_c) { pop_lang_context (); parser->implicit_extern_c = false; } if (token->type == CPP_PRAGMA) { /* A top-level declaration can consist solely of a #pragma. A nested declaration cannot, so this is done here and not in cp_parser_declaration. (A #pragma at block scope is handled in cp_parser_statement.) */ cp_parser_pragma (parser, pragma_external); continue; } /* Parse the declaration itself. */ cp_parser_declaration (parser); } } /* APPLE LOCAL begin radar 4548636 */ static bool /* This routine is called when lexer has seen an '__attribute__' token. It does look-ahead to see of __attribute__ list declaration is followed by an objective-c at_keyword. If so, it returns true. This is to disambiguate use of attribute before types and before objective-c's @interface declaration. */ objc_attr_follwed_by_at_keyword (cp_parser* parser) { cp_token token1; tree attributes = NULL_TREE; cp_lexer_save_tokens (parser->lexer); cp_parser_objc_maybe_attributes (parser, &attributes); gcc_assert (attributes); token1 = *cp_lexer_peek_token (parser->lexer); cp_lexer_rollback_tokens (parser->lexer); return OBJC_IS_AT_KEYWORD (token1.keyword); } /* APPLE LOCAL end radar 4548636 */ /* Parse a declaration. declaration: block-declaration function-definition template-declaration explicit-instantiation explicit-specialization linkage-specification namespace-definition GNU extension: declaration: __extension__ declaration */ static void cp_parser_declaration (cp_parser* parser) { cp_token token1; cp_token token2; int saved_pedantic; void *p; /* Check for the `__extension__' keyword. */ if (cp_parser_extension_opt (parser, &saved_pedantic)) { /* Parse the qualified declaration. */ cp_parser_declaration (parser); /* Restore the PEDANTIC flag. */ pedantic = saved_pedantic; return; } /* Try to figure out what kind of declaration is present. */ token1 = *cp_lexer_peek_token (parser->lexer); if (token1.type != CPP_EOF) token2 = *cp_lexer_peek_nth_token (parser->lexer, 2); else { token2.type = CPP_EOF; token2.keyword = RID_MAX; } /* Get the high-water mark for the DECLARATOR_OBSTACK. */ p = obstack_alloc (&declarator_obstack, 0); /* If the next token is `extern' and the following token is a string literal, then we have a linkage specification. */ if (token1.keyword == RID_EXTERN && cp_parser_is_string_literal (&token2)) cp_parser_linkage_specification (parser); /* If the next token is `template', then we have either a template declaration, an explicit instantiation, or an explicit specialization. */ else if (token1.keyword == RID_TEMPLATE) { /* `template <>' indicates a template specialization. */ if (token2.type == CPP_LESS && cp_lexer_peek_nth_token (parser->lexer, 3)->type == CPP_GREATER) cp_parser_explicit_specialization (parser); /* `template <' indicates a template declaration. */ else if (token2.type == CPP_LESS) cp_parser_template_declaration (parser, /*member_p=*/false); /* Anything else must be an explicit instantiation. */ else cp_parser_explicit_instantiation (parser); } /* If the next token is `export', then we have a template declaration. */ else if (token1.keyword == RID_EXPORT) cp_parser_template_declaration (parser, /*member_p=*/false); /* If the next token is `extern', 'static' or 'inline' and the one after that is `template', we have a GNU extended explicit instantiation directive. */ else if (cp_parser_allow_gnu_extensions_p (parser) && (token1.keyword == RID_EXTERN || token1.keyword == RID_STATIC || token1.keyword == RID_INLINE) && token2.keyword == RID_TEMPLATE) cp_parser_explicit_instantiation (parser); /* If the next token is `namespace', check for a named or unnamed namespace definition. */ else if (token1.keyword == RID_NAMESPACE && (/* A named namespace definition. */ (token2.type == CPP_NAME && (cp_lexer_peek_nth_token (parser->lexer, 3)->type != CPP_EQ)) /* An unnamed namespace definition. */ || token2.type == CPP_OPEN_BRACE || token2.keyword == RID_ATTRIBUTE)) cp_parser_namespace_definition (parser); /* Objective-C++ declaration/definition. */ /* APPLE LOCAL begin radar 4548636 */ else if (c_dialect_objc () && (OBJC_IS_AT_KEYWORD (token1.keyword) || (token1.keyword == RID_ATTRIBUTE && objc_attr_follwed_by_at_keyword (parser)))) /* APPLE LOCAL end radar 4548636 */ cp_parser_objc_declaration (parser); /* We must have either a block declaration or a function definition. */ else /* Try to parse a block-declaration, or a function-definition. */ cp_parser_block_declaration (parser, /*statement_p=*/false); /* Free any declarators allocated. */ obstack_free (&declarator_obstack, p); } /* Parse a block-declaration. block-declaration: simple-declaration asm-definition namespace-alias-definition using-declaration using-directive GNU Extension: block-declaration: __extension__ block-declaration label-declaration If STATEMENT_P is TRUE, then this block-declaration is occurring as part of a declaration-statement. */ static void cp_parser_block_declaration (cp_parser *parser, bool statement_p) { cp_token *token1; int saved_pedantic; /* Check for the `__extension__' keyword. */ if (cp_parser_extension_opt (parser, &saved_pedantic)) { /* Parse the qualified declaration. */ cp_parser_block_declaration (parser, statement_p); /* Restore the PEDANTIC flag. */ pedantic = saved_pedantic; return; } /* Peek at the next token to figure out which kind of declaration is present. */ token1 = cp_lexer_peek_token (parser->lexer); /* If the next keyword is `asm', we have an asm-definition. */ if (token1->keyword == RID_ASM) { if (statement_p) cp_parser_commit_to_tentative_parse (parser); /* APPLE LOCAL CW asm blocks */ cp_parser_asm_definition (parser, statement_p); } /* If the next keyword is `namespace', we have a namespace-alias-definition. */ else if (token1->keyword == RID_NAMESPACE) cp_parser_namespace_alias_definition (parser); /* If the next keyword is `using', we have either a using-declaration or a using-directive. */ else if (token1->keyword == RID_USING) { cp_token *token2; if (statement_p) cp_parser_commit_to_tentative_parse (parser); /* If the token after `using' is `namespace', then we have a using-directive. */ token2 = cp_lexer_peek_nth_token (parser->lexer, 2); if (token2->keyword == RID_NAMESPACE) cp_parser_using_directive (parser); /* Otherwise, it's a using-declaration. */ else cp_parser_using_declaration (parser, /*access_declaration_p=*/false); } /* If the next keyword is `__label__' we have a label declaration. */ else if (token1->keyword == RID_LABEL) { if (statement_p) cp_parser_commit_to_tentative_parse (parser); cp_parser_label_declaration (parser); } /* Anything else must be a simple-declaration. */ else cp_parser_simple_declaration (parser, !statement_p); } /* Parse a simple-declaration. simple-declaration: decl-specifier-seq [opt] init-declarator-list [opt] ; init-declarator-list: init-declarator init-declarator-list , init-declarator If FUNCTION_DEFINITION_ALLOWED_P is TRUE, then we also recognize a function-definition as a simple-declaration. */ static void cp_parser_simple_declaration (cp_parser* parser, bool function_definition_allowed_p) { cp_decl_specifier_seq decl_specifiers; int declares_class_or_enum; bool saw_declarator; /* Defer access checks until we know what is being declared; the checks for names appearing in the decl-specifier-seq should be done as if we were in the scope of the thing being declared. */ push_deferring_access_checks (dk_deferred); /* Parse the decl-specifier-seq. We have to keep track of whether or not the decl-specifier-seq declares a named class or enumeration type, since that is the only case in which the init-declarator-list is allowed to be empty. [dcl.dcl] In a simple-declaration, the optional init-declarator-list can be omitted only when declaring a class or enumeration, that is when the decl-specifier-seq contains either a class-specifier, an elaborated-type-specifier, or an enum-specifier. */ cp_parser_decl_specifier_seq (parser, CP_PARSER_FLAGS_OPTIONAL, &decl_specifiers, &declares_class_or_enum); /* We no longer need to defer access checks. */ stop_deferring_access_checks (); /* In a block scope, a valid declaration must always have a decl-specifier-seq. By not trying to parse declarators, we can resolve the declaration/expression ambiguity more quickly. */ if (!function_definition_allowed_p && !decl_specifiers.any_specifiers_p) { /* APPLE LOCAL begin CW asm blocks */ /* We might have seen an asm opcode, and it's time to switch to asm instruction handling. */ if (flag_iasm_blocks && iasm_state >= iasm_decls) return; /* APPLE LOCAL end CW asm blocks */ cp_parser_error (parser, "expected declaration"); goto done; } /* If the next two tokens are both identifiers, the code is erroneous. The usual cause of this situation is code like: T t; where "T" should name a type -- but does not. */ if (!decl_specifiers.type && cp_parser_parse_and_diagnose_invalid_type_name (parser)) { /* If parsing tentatively, we should commit; we really are looking at a declaration. */ cp_parser_commit_to_tentative_parse (parser); /* Give up. */ goto done; } /* If we have seen at least one decl-specifier, and the next token is not a parenthesis, then we must be looking at a declaration. (After "int (" we might be looking at a functional cast.) */ if (decl_specifiers.any_specifiers_p && cp_lexer_next_token_is_not (parser->lexer, CPP_OPEN_PAREN)) cp_parser_commit_to_tentative_parse (parser); /* Keep going until we hit the `;' at the end of the simple declaration. */ saw_declarator = false; while (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON)) { cp_token *token; bool function_definition_p; tree decl; if (saw_declarator) { /* If we are processing next declarator, coma is expected */ token = cp_lexer_peek_token (parser->lexer); gcc_assert (token->type == CPP_COMMA); cp_lexer_consume_token (parser->lexer); } else saw_declarator = true; /* Parse the init-declarator. */ decl = cp_parser_init_declarator (parser, &decl_specifiers, /*checks=*/NULL, function_definition_allowed_p, /*member_p=*/false, declares_class_or_enum, &function_definition_p); /* If an error occurred while parsing tentatively, exit quickly. (That usually happens when in the body of a function; each statement is treated as a declaration-statement until proven otherwise.) */ if (cp_parser_error_occurred (parser)) goto done; /* Handle function definitions specially. */ if (function_definition_p) { /* If the next token is a `,', then we are probably processing something like: void f() {}, *p; which is erroneous. */ if (cp_lexer_next_token_is (parser->lexer, CPP_COMMA)) error ("mixing declarations and function-definitions is forbidden"); /* Otherwise, we're done with the list of declarators. */ else { pop_deferring_access_checks (); return; } } /* The next token should be either a `,' or a `;'. */ token = cp_lexer_peek_token (parser->lexer); /* If it's a `,', there are more declarators to come. */ if (token->type == CPP_COMMA) /* will be consumed next time around */; /* If it's a `;', we are done. */ else if (token->type == CPP_SEMICOLON) break; /* Anything else is an error. */ else { /* If we have already issued an error message we don't need to issue another one. */ if (decl != error_mark_node || cp_parser_uncommitted_to_tentative_parse_p (parser)) cp_parser_error (parser, "expected %<,%> or %<;%>"); /* Skip tokens until we reach the end of the statement. */ cp_parser_skip_to_end_of_statement (parser); /* If the next token is now a `;', consume it. */ if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON)) cp_lexer_consume_token (parser->lexer); goto done; } /* After the first time around, a function-definition is not allowed -- even if it was OK at first. For example: int i, f() {} is not valid. */ function_definition_allowed_p = false; } /* Issue an error message if no declarators are present, and the decl-specifier-seq does not itself declare a class or enumeration. */ if (!saw_declarator) { if (cp_parser_declares_only_class_p (parser)) shadow_tag (&decl_specifiers); /* Perform any deferred access checks. */ perform_deferred_access_checks (); } /* Consume the `;'. */ cp_parser_require (parser, CPP_SEMICOLON, "`;'"); /* APPLE LOCAL begin CW asm blocks */ if (flag_iasm_blocks) iasm_in_decl = false; /* APPLE LOCAL end CW asm blocks */ done: pop_deferring_access_checks (); } /* Parse a decl-specifier-seq. decl-specifier-seq: decl-specifier-seq [opt] decl-specifier decl-specifier: storage-class-specifier type-specifier function-specifier friend typedef GNU Extension: decl-specifier: attributes Set *DECL_SPECS to a representation of the decl-specifier-seq. The parser flags FLAGS is used to control type-specifier parsing. *DECLARES_CLASS_OR_ENUM is set to the bitwise or of the following flags: 1: one of the decl-specifiers is an elaborated-type-specifier (i.e., a type declaration) 2: one of the decl-specifiers is an enum-specifier or a class-specifier (i.e., a type definition) */ static void cp_parser_decl_specifier_seq (cp_parser* parser, cp_parser_flags flags, cp_decl_specifier_seq *decl_specs, int* declares_class_or_enum) { bool constructor_possible_p = !parser->in_declarator_p; /* Clear DECL_SPECS. */ clear_decl_specs (decl_specs); /* Assume no class or enumeration type is declared. */ *declares_class_or_enum = 0; /* Keep reading specifiers until there are no more to read. */ while (true) { bool constructor_p; bool found_decl_spec; cp_token *token; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* Handle attributes. */ if (token->keyword == RID_ATTRIBUTE) { /* Parse the attributes. */ decl_specs->attributes = chainon (decl_specs->attributes, cp_parser_attributes_opt (parser)); continue; } /* Assume we will find a decl-specifier keyword. */ found_decl_spec = true; /* If the next token is an appropriate keyword, we can simply add it to the list. */ switch (token->keyword) { /* decl-specifier: friend */ case RID_FRIEND: if (!at_class_scope_p ()) { error ("%<friend%> used outside of class"); cp_lexer_purge_token (parser->lexer); } else { ++decl_specs->specs[(int) ds_friend]; /* Consume the token. */ cp_lexer_consume_token (parser->lexer); } break; /* function-specifier: inline virtual explicit */ case RID_INLINE: case RID_VIRTUAL: case RID_EXPLICIT: cp_parser_function_specifier_opt (parser, decl_specs); break; /* decl-specifier: typedef */ case RID_TYPEDEF: ++decl_specs->specs[(int) ds_typedef]; /* Consume the token. */ cp_lexer_consume_token (parser->lexer); /* A constructor declarator cannot appear in a typedef. */ constructor_possible_p = false; /* The "typedef" keyword can only occur in a declaration; we may as well commit at this point. */ cp_parser_commit_to_tentative_parse (parser); if (decl_specs->storage_class != sc_none) decl_specs->conflicting_specifiers_p = true; break; /* storage-class-specifier: auto register static extern mutable GNU Extension: thread */ case RID_AUTO: case RID_REGISTER: case RID_STATIC: case RID_EXTERN: case RID_MUTABLE: /* Consume the token. */ cp_lexer_consume_token (parser->lexer); cp_parser_set_storage_class (parser, decl_specs, token->keyword); break; case RID_THREAD: /* Consume the token. */ cp_lexer_consume_token (parser->lexer); ++decl_specs->specs[(int) ds_thread]; break; /* APPLE LOCAL begin CW asm blocks */ /* If we ever get here, we must be in CW asm mode. */ case RID_ASM: /* Consume the token. */ cp_lexer_consume_token (parser->lexer); ++decl_specs->specs[(int) ds_iasm_asm]; break; /* APPLE LOCAL end CW asm blocks */ default: /* We did not yet find a decl-specifier yet. */ found_decl_spec = false; break; } /* Constructors are a special case. The `S' in `S()' is not a decl-specifier; it is the beginning of the declarator. */ constructor_p = (!found_decl_spec && constructor_possible_p && (cp_parser_constructor_declarator_p (parser, decl_specs->specs[(int) ds_friend] != 0))); /* If we don't have a DECL_SPEC yet, then we must be looking at a type-specifier. */ if (!found_decl_spec && !constructor_p) { int decl_spec_declares_class_or_enum; bool is_cv_qualifier; tree type_spec; type_spec = cp_parser_type_specifier (parser, flags, decl_specs, /*is_declaration=*/true, &decl_spec_declares_class_or_enum, &is_cv_qualifier); *declares_class_or_enum |= decl_spec_declares_class_or_enum; /* If this type-specifier referenced a user-defined type (a typedef, class-name, etc.), then we can't allow any more such type-specifiers henceforth. [dcl.spec] The longest sequence of decl-specifiers that could possibly be a type name is taken as the decl-specifier-seq of a declaration. The sequence shall be self-consistent as described below. [dcl.type] As a general rule, at most one type-specifier is allowed in the complete decl-specifier-seq of a declaration. The only exceptions are the following: -- const or volatile can be combined with any other type-specifier. -- signed or unsigned can be combined with char, long, short, or int. -- .. Example: typedef char* Pc; void g (const int Pc); Here, Pc is *not* part of the decl-specifier seq; it's the declarator. Therefore, once we see a type-specifier (other than a cv-qualifier), we forbid any additional user-defined types. We *do* still allow things like `int int' to be considered a decl-specifier-seq, and issue the error message later. */ if (type_spec && !is_cv_qualifier) flags |= CP_PARSER_FLAGS_NO_USER_DEFINED_TYPES; /* A constructor declarator cannot follow a type-specifier. */ if (type_spec) { constructor_possible_p = false; found_decl_spec = true; } } /* If we still do not have a DECL_SPEC, then there are no more decl-specifiers. */ if (!found_decl_spec) break; decl_specs->any_specifiers_p = true; /* After we see one decl-specifier, further decl-specifiers are always optional. */ flags |= CP_PARSER_FLAGS_OPTIONAL; } cp_parser_check_decl_spec (decl_specs); /* Don't allow a friend specifier with a class definition. */ if (decl_specs->specs[(int) ds_friend] != 0 && (*declares_class_or_enum & 2)) error ("class definition may not be declared a friend"); } /* Parse an (optional) storage-class-specifier. storage-class-specifier: auto register static extern mutable GNU Extension: storage-class-specifier: thread Returns an IDENTIFIER_NODE corresponding to the keyword used. */ static tree cp_parser_storage_class_specifier_opt (cp_parser* parser) { switch (cp_lexer_peek_token (parser->lexer)->keyword) { case RID_AUTO: case RID_REGISTER: case RID_STATIC: case RID_EXTERN: case RID_MUTABLE: case RID_THREAD: /* APPLE LOCAL begin CW asm blocks */ /* If we ever get here, we must be in CW asm mode. */ case RID_ASM: /* APPLE LOCAL end CW asm blocks */ /* Consume the token. */ return cp_lexer_consume_token (parser->lexer)->u.value; default: return NULL_TREE; } } /* Parse an (optional) function-specifier. function-specifier: inline virtual explicit Returns an IDENTIFIER_NODE corresponding to the keyword used. Updates DECL_SPECS, if it is non-NULL. */ static tree cp_parser_function_specifier_opt (cp_parser* parser, cp_decl_specifier_seq *decl_specs) { switch (cp_lexer_peek_token (parser->lexer)->keyword) { case RID_INLINE: if (decl_specs) ++decl_specs->specs[(int) ds_inline]; break; case RID_VIRTUAL: /* 14.5.2.3 [temp.mem] A member function template shall not be virtual. */ if (PROCESSING_REAL_TEMPLATE_DECL_P ()) error ("templates may not be %<virtual%>"); else if (decl_specs) ++decl_specs->specs[(int) ds_virtual]; break; case RID_EXPLICIT: if (decl_specs) ++decl_specs->specs[(int) ds_explicit]; break; default: return NULL_TREE; } /* Consume the token. */ return cp_lexer_consume_token (parser->lexer)->u.value; } /* Parse a linkage-specification. linkage-specification: extern string-literal { declaration-seq [opt] } extern string-literal declaration */ static void cp_parser_linkage_specification (cp_parser* parser) { tree linkage; /* Look for the `extern' keyword. */ cp_parser_require_keyword (parser, RID_EXTERN, "`extern'"); /* Look for the string-literal. */ linkage = cp_parser_string_literal (parser, false, false); /* Transform the literal into an identifier. If the literal is a wide-character string, or contains embedded NULs, then we can't handle it as the user wants. */ if (strlen (TREE_STRING_POINTER (linkage)) != (size_t) (TREE_STRING_LENGTH (linkage) - 1)) { cp_parser_error (parser, "invalid linkage-specification"); /* Assume C++ linkage. */ linkage = lang_name_cplusplus; } else linkage = get_identifier (TREE_STRING_POINTER (linkage)); /* We're now using the new linkage. */ push_lang_context (linkage); /* If the next token is a `{', then we're using the first production. */ if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE)) { /* Consume the `{' token. */ cp_lexer_consume_token (parser->lexer); /* Parse the declarations. */ cp_parser_declaration_seq_opt (parser); /* Look for the closing `}'. */ cp_parser_require (parser, CPP_CLOSE_BRACE, "`}'"); } /* Otherwise, there's just one declaration. */ else { bool saved_in_unbraced_linkage_specification_p; saved_in_unbraced_linkage_specification_p = parser->in_unbraced_linkage_specification_p; parser->in_unbraced_linkage_specification_p = true; cp_parser_declaration (parser); parser->in_unbraced_linkage_specification_p = saved_in_unbraced_linkage_specification_p; } /* We're done with the linkage-specification. */ pop_lang_context (); } /* Special member functions [gram.special] */ /* Parse a conversion-function-id. conversion-function-id: operator conversion-type-id Returns an IDENTIFIER_NODE representing the operator. */ static tree cp_parser_conversion_function_id (cp_parser* parser) { tree type; tree saved_scope; tree saved_qualifying_scope; tree saved_object_scope; tree pushed_scope = NULL_TREE; /* Look for the `operator' token. */ if (!cp_parser_require_keyword (parser, RID_OPERATOR, "`operator'")) return error_mark_node; /* When we parse the conversion-type-id, the current scope will be reset. However, we need that information in able to look up the conversion function later, so we save it here. */ saved_scope = parser->scope; saved_qualifying_scope = parser->qualifying_scope; saved_object_scope = parser->object_scope; /* We must enter the scope of the class so that the names of entities declared within the class are available in the conversion-type-id. For example, consider: struct S { typedef int I; operator I(); }; S::operator I() { ... } In order to see that `I' is a type-name in the definition, we must be in the scope of `S'. */ if (saved_scope) pushed_scope = push_scope (saved_scope); /* Parse the conversion-type-id. */ type = cp_parser_conversion_type_id (parser); /* Leave the scope of the class, if any. */ if (pushed_scope) pop_scope (pushed_scope); /* Restore the saved scope. */ parser->scope = saved_scope; parser->qualifying_scope = saved_qualifying_scope; parser->object_scope = saved_object_scope; /* If the TYPE is invalid, indicate failure. */ if (type == error_mark_node) return error_mark_node; return mangle_conv_op_name_for_type (type); } /* Parse a conversion-type-id: conversion-type-id: type-specifier-seq conversion-declarator [opt] Returns the TYPE specified. */ static tree cp_parser_conversion_type_id (cp_parser* parser) { tree attributes; cp_decl_specifier_seq type_specifiers; cp_declarator *declarator; tree type_specified; /* Parse the attributes. */ attributes = cp_parser_attributes_opt (parser); /* Parse the type-specifiers. */ cp_parser_type_specifier_seq (parser, /*is_condition=*/false, &type_specifiers); /* If that didn't work, stop. */ if (type_specifiers.type == error_mark_node) return error_mark_node; /* Parse the conversion-declarator. */ declarator = cp_parser_conversion_declarator_opt (parser); type_specified = grokdeclarator (declarator, &type_specifiers, TYPENAME, /*initialized=*/0, &attributes); if (attributes) cplus_decl_attributes (&type_specified, attributes, /*flags=*/0); return type_specified; } /* Parse an (optional) conversion-declarator. conversion-declarator: ptr-operator conversion-declarator [opt] */ static cp_declarator * cp_parser_conversion_declarator_opt (cp_parser* parser) { enum tree_code code; tree class_type; cp_cv_quals cv_quals; /* We don't know if there's a ptr-operator next, or not. */ cp_parser_parse_tentatively (parser); /* Try the ptr-operator. */ code = cp_parser_ptr_operator (parser, &class_type, &cv_quals); /* If it worked, look for more conversion-declarators. */ if (cp_parser_parse_definitely (parser)) { cp_declarator *declarator; /* Parse another optional declarator. */ declarator = cp_parser_conversion_declarator_opt (parser); /* Create the representation of the declarator. */ if (class_type) declarator = make_ptrmem_declarator (cv_quals, class_type, declarator); else if (code == INDIRECT_REF) declarator = make_pointer_declarator (cv_quals, declarator); else declarator = make_reference_declarator (cv_quals, declarator); return declarator; } return NULL; } /* Parse an (optional) ctor-initializer. ctor-initializer: : mem-initializer-list Returns TRUE iff the ctor-initializer was actually present. */ static bool cp_parser_ctor_initializer_opt (cp_parser* parser) { /* If the next token is not a `:', then there is no ctor-initializer. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_COLON)) { /* Do default initialization of any bases and members. */ if (DECL_CONSTRUCTOR_P (current_function_decl)) finish_mem_initializers (NULL_TREE); return false; } /* Consume the `:' token. */ cp_lexer_consume_token (parser->lexer); /* And the mem-initializer-list. */ cp_parser_mem_initializer_list (parser); return true; } /* Parse a mem-initializer-list. mem-initializer-list: mem-initializer mem-initializer , mem-initializer-list */ static void cp_parser_mem_initializer_list (cp_parser* parser) { tree mem_initializer_list = NULL_TREE; /* Let the semantic analysis code know that we are starting the mem-initializer-list. */ if (!DECL_CONSTRUCTOR_P (current_function_decl)) error ("only constructors take base initializers"); /* Loop through the list. */ while (true) { tree mem_initializer; /* Parse the mem-initializer. */ mem_initializer = cp_parser_mem_initializer (parser); /* Add it to the list, unless it was erroneous. */ if (mem_initializer != error_mark_node) { TREE_CHAIN (mem_initializer) = mem_initializer_list; mem_initializer_list = mem_initializer; } /* If the next token is not a `,', we're done. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_COMMA)) break; /* Consume the `,' token. */ cp_lexer_consume_token (parser->lexer); } /* Perform semantic analysis. */ if (DECL_CONSTRUCTOR_P (current_function_decl)) finish_mem_initializers (mem_initializer_list); } /* Parse a mem-initializer. mem-initializer: mem-initializer-id ( expression-list [opt] ) GNU extension: mem-initializer: ( expression-list [opt] ) Returns a TREE_LIST. The TREE_PURPOSE is the TYPE (for a base class) or FIELD_DECL (for a non-static data member) to initialize; the TREE_VALUE is the expression-list. An empty initialization list is represented by void_list_node. */ static tree cp_parser_mem_initializer (cp_parser* parser) { tree mem_initializer_id; tree expression_list; tree member; /* Find out what is being initialized. */ if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN)) { pedwarn ("anachronistic old-style base class initializer"); mem_initializer_id = NULL_TREE; } else mem_initializer_id = cp_parser_mem_initializer_id (parser); member = expand_member_init (mem_initializer_id); if (member && !DECL_P (member)) in_base_initializer = 1; expression_list = cp_parser_parenthesized_expression_list (parser, false, /*cast_p=*/false, /*non_constant_p=*/NULL); if (expression_list == error_mark_node) return error_mark_node; if (!expression_list) expression_list = void_type_node; in_base_initializer = 0; return member ? build_tree_list (member, expression_list) : error_mark_node; } /* Parse a mem-initializer-id. mem-initializer-id: :: [opt] nested-name-specifier [opt] class-name identifier Returns a TYPE indicating the class to be initializer for the first production. Returns an IDENTIFIER_NODE indicating the data member to be initialized for the second production. */ static tree cp_parser_mem_initializer_id (cp_parser* parser) { bool global_scope_p; bool nested_name_specifier_p; bool template_p = false; tree id; /* `typename' is not allowed in this context ([temp.res]). */ if (cp_lexer_next_token_is_keyword (parser->lexer, RID_TYPENAME)) { error ("keyword %<typename%> not allowed in this context (a qualified " "member initializer is implicitly a type)"); cp_lexer_consume_token (parser->lexer); } /* Look for the optional `::' operator. */ global_scope_p = (cp_parser_global_scope_opt (parser, /*current_scope_valid_p=*/false) != NULL_TREE); /* Look for the optional nested-name-specifier. The simplest way to implement: [temp.res] The keyword `typename' is not permitted in a base-specifier or mem-initializer; in these contexts a qualified name that depends on a template-parameter is implicitly assumed to be a type name. is to assume that we have seen the `typename' keyword at this point. */ nested_name_specifier_p = (cp_parser_nested_name_specifier_opt (parser, /*typename_keyword_p=*/true, /*check_dependency_p=*/true, /*type_p=*/true, /*is_declaration=*/true) != NULL_TREE); if (nested_name_specifier_p) template_p = cp_parser_optional_template_keyword (parser); /* If there is a `::' operator or a nested-name-specifier, then we are definitely looking for a class-name. */ if (global_scope_p || nested_name_specifier_p) return cp_parser_class_name (parser, /*typename_keyword_p=*/true, /*template_keyword_p=*/template_p, none_type, /*check_dependency_p=*/true, /*class_head_p=*/false, /*is_declaration=*/true); /* Otherwise, we could also be looking for an ordinary identifier. */ cp_parser_parse_tentatively (parser); /* Try a class-name. */ id = cp_parser_class_name (parser, /*typename_keyword_p=*/true, /*template_keyword_p=*/false, none_type, /*check_dependency_p=*/true, /*class_head_p=*/false, /*is_declaration=*/true); /* If we found one, we're done. */ if (cp_parser_parse_definitely (parser)) return id; /* Otherwise, look for an ordinary identifier. */ return cp_parser_identifier (parser); } /* Overloading [gram.over] */ /* Parse an operator-function-id. operator-function-id: operator operator Returns an IDENTIFIER_NODE for the operator which is a human-readable spelling of the identifier, e.g., `operator +'. */ static tree cp_parser_operator_function_id (cp_parser* parser) { /* Look for the `operator' keyword. */ if (!cp_parser_require_keyword (parser, RID_OPERATOR, "`operator'")) return error_mark_node; /* And then the name of the operator itself. */ return cp_parser_operator (parser); } /* Parse an operator. operator: new delete new[] delete[] + - * / % ^ & | ~ ! = < > += -= *= /= %= ^= &= |= << >> >>= <<= == != <= >= && || ++ -- , ->* -> () [] GNU Extensions: operator: <? >? <?= >?= Returns an IDENTIFIER_NODE for the operator which is a human-readable spelling of the identifier, e.g., `operator +'. */ static tree cp_parser_operator (cp_parser* parser) { tree id = NULL_TREE; cp_token *token; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* Figure out which operator we have. */ switch (token->type) { case CPP_KEYWORD: { enum tree_code op; /* The keyword should be either `new' or `delete'. */ if (token->keyword == RID_NEW) op = NEW_EXPR; else if (token->keyword == RID_DELETE) op = DELETE_EXPR; else break; /* Consume the `new' or `delete' token. */ cp_lexer_consume_token (parser->lexer); /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* If it's a `[' token then this is the array variant of the operator. */ if (token->type == CPP_OPEN_SQUARE) { /* Consume the `[' token. */ cp_lexer_consume_token (parser->lexer); /* Look for the `]' token. */ cp_parser_require (parser, CPP_CLOSE_SQUARE, "`]'"); id = ansi_opname (op == NEW_EXPR ? VEC_NEW_EXPR : VEC_DELETE_EXPR); } /* Otherwise, we have the non-array variant. */ else id = ansi_opname (op); return id; } case CPP_PLUS: id = ansi_opname (PLUS_EXPR); break; case CPP_MINUS: id = ansi_opname (MINUS_EXPR); break; case CPP_MULT: id = ansi_opname (MULT_EXPR); break; case CPP_DIV: id = ansi_opname (TRUNC_DIV_EXPR); break; case CPP_MOD: id = ansi_opname (TRUNC_MOD_EXPR); break; case CPP_XOR: id = ansi_opname (BIT_XOR_EXPR); break; case CPP_AND: id = ansi_opname (BIT_AND_EXPR); break; case CPP_OR: id = ansi_opname (BIT_IOR_EXPR); break; case CPP_COMPL: id = ansi_opname (BIT_NOT_EXPR); break; case CPP_NOT: id = ansi_opname (TRUTH_NOT_EXPR); break; case CPP_EQ: id = ansi_assopname (NOP_EXPR); break; case CPP_LESS: id = ansi_opname (LT_EXPR); break; case CPP_GREATER: id = ansi_opname (GT_EXPR); break; case CPP_PLUS_EQ: id = ansi_assopname (PLUS_EXPR); break; case CPP_MINUS_EQ: id = ansi_assopname (MINUS_EXPR); break; case CPP_MULT_EQ: id = ansi_assopname (MULT_EXPR); break; case CPP_DIV_EQ: id = ansi_assopname (TRUNC_DIV_EXPR); break; case CPP_MOD_EQ: id = ansi_assopname (TRUNC_MOD_EXPR); break; case CPP_XOR_EQ: id = ansi_assopname (BIT_XOR_EXPR); break; case CPP_AND_EQ: id = ansi_assopname (BIT_AND_EXPR); break; case CPP_OR_EQ: id = ansi_assopname (BIT_IOR_EXPR); break; case CPP_LSHIFT: id = ansi_opname (LSHIFT_EXPR); break; case CPP_RSHIFT: id = ansi_opname (RSHIFT_EXPR); break; case CPP_LSHIFT_EQ: id = ansi_assopname (LSHIFT_EXPR); break; case CPP_RSHIFT_EQ: id = ansi_assopname (RSHIFT_EXPR); break; case CPP_EQ_EQ: id = ansi_opname (EQ_EXPR); break; case CPP_NOT_EQ: id = ansi_opname (NE_EXPR); break; case CPP_LESS_EQ: id = ansi_opname (LE_EXPR); break; case CPP_GREATER_EQ: id = ansi_opname (GE_EXPR); break; case CPP_AND_AND: id = ansi_opname (TRUTH_ANDIF_EXPR); break; case CPP_OR_OR: id = ansi_opname (TRUTH_ORIF_EXPR); break; case CPP_PLUS_PLUS: id = ansi_opname (POSTINCREMENT_EXPR); break; case CPP_MINUS_MINUS: id = ansi_opname (PREDECREMENT_EXPR); break; case CPP_COMMA: id = ansi_opname (COMPOUND_EXPR); break; case CPP_DEREF_STAR: id = ansi_opname (MEMBER_REF); break; case CPP_DEREF: id = ansi_opname (COMPONENT_REF); break; case CPP_OPEN_PAREN: /* Consume the `('. */ cp_lexer_consume_token (parser->lexer); /* Look for the matching `)'. */ cp_parser_require (parser, CPP_CLOSE_PAREN, "`)'"); return ansi_opname (CALL_EXPR); case CPP_OPEN_SQUARE: /* Consume the `['. */ cp_lexer_consume_token (parser->lexer); /* Look for the matching `]'. */ cp_parser_require (parser, CPP_CLOSE_SQUARE, "`]'"); return ansi_opname (ARRAY_REF); default: /* Anything else is an error. */ break; } /* If we have selected an identifier, we need to consume the operator token. */ if (id) cp_lexer_consume_token (parser->lexer); /* Otherwise, no valid operator name was present. */ else { cp_parser_error (parser, "expected operator"); id = error_mark_node; } return id; } /* Parse a template-declaration. template-declaration: export [opt] template < template-parameter-list > declaration If MEMBER_P is TRUE, this template-declaration occurs within a class-specifier. The grammar rule given by the standard isn't correct. What is really meant is: template-declaration: export [opt] template-parameter-list-seq decl-specifier-seq [opt] init-declarator [opt] ; export [opt] template-parameter-list-seq function-definition template-parameter-list-seq: template-parameter-list-seq [opt] template < template-parameter-list > */ static void cp_parser_template_declaration (cp_parser* parser, bool member_p) { /* Check for `export'. */ if (cp_lexer_next_token_is_keyword (parser->lexer, RID_EXPORT)) { /* Consume the `export' token. */ cp_lexer_consume_token (parser->lexer); /* Warn that we do not support `export'. */ warning (0, "keyword %<export%> not implemented, and will be ignored"); } cp_parser_template_declaration_after_export (parser, member_p); } /* Parse a template-parameter-list. template-parameter-list: template-parameter template-parameter-list , template-parameter Returns a TREE_LIST. Each node represents a template parameter. The nodes are connected via their TREE_CHAINs. */ static tree cp_parser_template_parameter_list (cp_parser* parser) { tree parameter_list = NULL_TREE; begin_template_parm_list (); while (true) { tree parameter; cp_token *token; bool is_non_type; /* Parse the template-parameter. */ parameter = cp_parser_template_parameter (parser, &is_non_type); /* Add it to the list. */ if (parameter != error_mark_node) parameter_list = process_template_parm (parameter_list, parameter, is_non_type); else { tree err_parm = build_tree_list (parameter, parameter); TREE_VALUE (err_parm) = error_mark_node; parameter_list = chainon (parameter_list, err_parm); } /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* If it's not a `,', we're done. */ if (token->type != CPP_COMMA) break; /* Otherwise, consume the `,' token. */ cp_lexer_consume_token (parser->lexer); } return end_template_parm_list (parameter_list); } /* Parse a template-parameter. template-parameter: type-parameter parameter-declaration If all goes well, returns a TREE_LIST. The TREE_VALUE represents the parameter. The TREE_PURPOSE is the default value, if any. Returns ERROR_MARK_NODE on failure. *IS_NON_TYPE is set to true iff this parameter is a non-type parameter. */ static tree cp_parser_template_parameter (cp_parser* parser, bool *is_non_type) { cp_token *token; cp_parameter_declarator *parameter_declarator; tree parm; /* Assume it is a type parameter or a template parameter. */ *is_non_type = false; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* If it is `class' or `template', we have a type-parameter. */ if (token->keyword == RID_TEMPLATE) return cp_parser_type_parameter (parser); /* If it is `class' or `typename' we do not know yet whether it is a type parameter or a non-type parameter. Consider: template <typename T, typename T::X X> ... or: template <class C, class D*> ... Here, the first parameter is a type parameter, and the second is a non-type parameter. We can tell by looking at the token after the identifier -- if it is a `,', `=', or `>' then we have a type parameter. */ if (token->keyword == RID_TYPENAME || token->keyword == RID_CLASS) { /* Peek at the token after `class' or `typename'. */ token = cp_lexer_peek_nth_token (parser->lexer, 2); /* If it's an identifier, skip it. */ if (token->type == CPP_NAME) token = cp_lexer_peek_nth_token (parser->lexer, 3); /* Now, see if the token looks like the end of a template parameter. */ if (token->type == CPP_COMMA || token->type == CPP_EQ || token->type == CPP_GREATER) return cp_parser_type_parameter (parser); } /* Otherwise, it is a non-type parameter. [temp.param] When parsing a default template-argument for a non-type template-parameter, the first non-nested `>' is taken as the end of the template parameter-list rather than a greater-than operator. */ *is_non_type = true; parameter_declarator = cp_parser_parameter_declaration (parser, /*template_parm_p=*/true, /*parenthesized_p=*/NULL); parm = grokdeclarator (parameter_declarator->declarator, &parameter_declarator->decl_specifiers, PARM, /*initialized=*/0, /*attrlist=*/NULL); if (parm == error_mark_node) return error_mark_node; return build_tree_list (parameter_declarator->default_argument, parm); } /* Parse a type-parameter. type-parameter: class identifier [opt] class identifier [opt] = type-id typename identifier [opt] typename identifier [opt] = type-id template < template-parameter-list > class identifier [opt] template < template-parameter-list > class identifier [opt] = id-expression Returns a TREE_LIST. The TREE_VALUE is itself a TREE_LIST. The TREE_PURPOSE is the default-argument, if any. The TREE_VALUE is the declaration of the parameter. */ static tree cp_parser_type_parameter (cp_parser* parser) { cp_token *token; tree parameter; /* Look for a keyword to tell us what kind of parameter this is. */ token = cp_parser_require (parser, CPP_KEYWORD, "`class', `typename', or `template'"); if (!token) return error_mark_node; switch (token->keyword) { case RID_CLASS: case RID_TYPENAME: { tree identifier; tree default_argument; /* If the next token is an identifier, then it names the parameter. */ if (cp_lexer_next_token_is (parser->lexer, CPP_NAME)) identifier = cp_parser_identifier (parser); else identifier = NULL_TREE; /* Create the parameter. */ parameter = finish_template_type_parm (class_type_node, identifier); /* If the next token is an `=', we have a default argument. */ if (cp_lexer_next_token_is (parser->lexer, CPP_EQ)) { /* Consume the `=' token. */ cp_lexer_consume_token (parser->lexer); /* Parse the default-argument. */ push_deferring_access_checks (dk_no_deferred); default_argument = cp_parser_type_id (parser); pop_deferring_access_checks (); } else default_argument = NULL_TREE; /* Create the combined representation of the parameter and the default argument. */ parameter = build_tree_list (default_argument, parameter); } break; case RID_TEMPLATE: { tree parameter_list; tree identifier; tree default_argument; /* Look for the `<'. */ cp_parser_require (parser, CPP_LESS, "`<'"); /* Parse the template-parameter-list. */ parameter_list = cp_parser_template_parameter_list (parser); /* Look for the `>'. */ cp_parser_require (parser, CPP_GREATER, "`>'"); /* Look for the `class' keyword. */ cp_parser_require_keyword (parser, RID_CLASS, "`class'"); /* If the next token is an `=', then there is a default-argument. If the next token is a `>', we are at the end of the parameter-list. If the next token is a `,', then we are at the end of this parameter. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_EQ) && cp_lexer_next_token_is_not (parser->lexer, CPP_GREATER) && cp_lexer_next_token_is_not (parser->lexer, CPP_COMMA)) { identifier = cp_parser_identifier (parser); /* Treat invalid names as if the parameter were nameless. */ if (identifier == error_mark_node) identifier = NULL_TREE; } else identifier = NULL_TREE; /* Create the template parameter. */ parameter = finish_template_template_parm (class_type_node, identifier); /* If the next token is an `=', then there is a default-argument. */ if (cp_lexer_next_token_is (parser->lexer, CPP_EQ)) { bool is_template; /* Consume the `='. */ cp_lexer_consume_token (parser->lexer); /* Parse the id-expression. */ push_deferring_access_checks (dk_no_deferred); default_argument = cp_parser_id_expression (parser, /*template_keyword_p=*/false, /*check_dependency_p=*/true, /*template_p=*/&is_template, /*declarator_p=*/false, /*optional_p=*/false); if (TREE_CODE (default_argument) == TYPE_DECL) /* If the id-expression was a template-id that refers to a template-class, we already have the declaration here, so no further lookup is needed. */ ; else /* Look up the name. */ default_argument = cp_parser_lookup_name (parser, default_argument, none_type, /*is_template=*/is_template, /*is_namespace=*/false, /*check_dependency=*/true, /*ambiguous_decls=*/NULL); /* See if the default argument is valid. */ default_argument = check_template_template_default_arg (default_argument); pop_deferring_access_checks (); } else default_argument = NULL_TREE; /* Create the combined representation of the parameter and the default argument. */ parameter = build_tree_list (default_argument, parameter); } break; default: gcc_unreachable (); break; } return parameter; } /* Parse a template-id. template-id: template-name < template-argument-list [opt] > If TEMPLATE_KEYWORD_P is TRUE, then we have just seen the `template' keyword. In this case, a TEMPLATE_ID_EXPR will be returned. Otherwise, if the template-name names a function, or set of functions, returns a TEMPLATE_ID_EXPR. If the template-name names a class, returns a TYPE_DECL for the specialization. If CHECK_DEPENDENCY_P is FALSE, names are looked up in uninstantiated templates. */ static tree cp_parser_template_id (cp_parser *parser, bool template_keyword_p, bool check_dependency_p, bool is_declaration) { int i; tree template; tree arguments; tree template_id; cp_token_position start_of_id = 0; deferred_access_check *chk; VEC (deferred_access_check,gc) *access_check; cp_token *next_token, *next_token_2; bool is_identifier; /* If the next token corresponds to a template-id, there is no need to reparse it. */ next_token = cp_lexer_peek_token (parser->lexer); if (next_token->type == CPP_TEMPLATE_ID) { struct tree_check *check_value; /* Get the stored value. */ check_value = cp_lexer_consume_token (parser->lexer)->u.tree_check_value; /* Perform any access checks that were deferred. */ access_check = check_value->checks; if (access_check) { for (i = 0 ; VEC_iterate (deferred_access_check, access_check, i, chk) ; ++i) { perform_or_defer_access_check (chk->binfo, chk->decl, chk->diag_decl); } } /* Return the stored value. */ return check_value->value; } /* Avoid performing name lookup if there is no possibility of finding a template-id. */ if ((next_token->type != CPP_NAME && next_token->keyword != RID_OPERATOR) || (next_token->type == CPP_NAME && !cp_parser_nth_token_starts_template_argument_list_p (parser, 2))) { cp_parser_error (parser, "expected template-id"); return error_mark_node; } /* Remember where the template-id starts. */ if (cp_parser_uncommitted_to_tentative_parse_p (parser)) start_of_id = cp_lexer_token_position (parser->lexer, false); push_deferring_access_checks (dk_deferred); /* Parse the template-name. */ is_identifier = false; template = cp_parser_template_name (parser, template_keyword_p, check_dependency_p, is_declaration, &is_identifier); if (template == error_mark_node || is_identifier) { pop_deferring_access_checks (); return template; } /* If we find the sequence `[:' after a template-name, it's probably a digraph-typo for `< ::'. Substitute the tokens and check if we can parse correctly the argument list. */ next_token = cp_lexer_peek_token (parser->lexer); next_token_2 = cp_lexer_peek_nth_token (parser->lexer, 2); if (next_token->type == CPP_OPEN_SQUARE && next_token->flags & DIGRAPH && next_token_2->type == CPP_COLON && !(next_token_2->flags & PREV_WHITE)) { cp_parser_parse_tentatively (parser); /* Change `:' into `::'. */ next_token_2->type = CPP_SCOPE; /* Consume the first token (CPP_OPEN_SQUARE - which we pretend it is CPP_LESS. */ cp_lexer_consume_token (parser->lexer); /* Parse the arguments. */ arguments = cp_parser_enclosed_template_argument_list (parser); if (!cp_parser_parse_definitely (parser)) { /* If we couldn't parse an argument list, then we revert our changes and return simply an error. Maybe this is not a template-id after all. */ next_token_2->type = CPP_COLON; cp_parser_error (parser, "expected %<<%>"); pop_deferring_access_checks (); return error_mark_node; } /* Otherwise, emit an error about the invalid digraph, but continue parsing because we got our argument list. */ pedwarn ("%<<::%> cannot begin a template-argument list"); inform ("%<<:%> is an alternate spelling for %<[%>. Insert whitespace " "between %<<%> and %<::%>"); if (!flag_permissive) { static bool hint; if (!hint) { inform ("(if you use -fpermissive G++ will accept your code)"); hint = true; } } } else { /* Look for the `<' that starts the template-argument-list. */ if (!cp_parser_require (parser, CPP_LESS, "`<'")) { pop_deferring_access_checks (); return error_mark_node; } /* Parse the arguments. */ arguments = cp_parser_enclosed_template_argument_list (parser); } /* Build a representation of the specialization. */ if (TREE_CODE (template) == IDENTIFIER_NODE) template_id = build_min_nt (TEMPLATE_ID_EXPR, template, arguments); else if (DECL_CLASS_TEMPLATE_P (template) || DECL_TEMPLATE_TEMPLATE_PARM_P (template)) { bool entering_scope; /* In "template <typename T> ... A<T>::", A<T> is the abstract A template (rather than some instantiation thereof) only if is not nested within some other construct. For example, in "template <typename T> void f(T) { A<T>::", A<T> is just an instantiation of A. */ entering_scope = (template_parm_scope_p () && cp_lexer_next_token_is (parser->lexer, CPP_SCOPE)); template_id = finish_template_type (template, arguments, entering_scope); } else { /* If it's not a class-template or a template-template, it should be a function-template. */ gcc_assert ((DECL_FUNCTION_TEMPLATE_P (template) || TREE_CODE (template) == OVERLOAD || BASELINK_P (template))); template_id = lookup_template_function (template, arguments); } /* If parsing tentatively, replace the sequence of tokens that makes up the template-id with a CPP_TEMPLATE_ID token. That way, should we re-parse the token stream, we will not have to repeat the effort required to do the parse, nor will we issue duplicate error messages about problems during instantiation of the template. */ if (start_of_id) { cp_token *token = cp_lexer_token_at (parser->lexer, start_of_id); /* Reset the contents of the START_OF_ID token. */ token->type = CPP_TEMPLATE_ID; /* Retrieve any deferred checks. Do not pop this access checks yet so the memory will not be reclaimed during token replacing below. */ token->u.tree_check_value = GGC_CNEW (struct tree_check); token->u.tree_check_value->value = template_id; token->u.tree_check_value->checks = get_deferred_access_checks (); token->keyword = RID_MAX; /* Purge all subsequent tokens. */ cp_lexer_purge_tokens_after (parser->lexer, start_of_id); /* ??? Can we actually assume that, if template_id == error_mark_node, we will have issued a diagnostic to the user, as opposed to simply marking the tentative parse as failed? */ if (cp_parser_error_occurred (parser) && template_id != error_mark_node) error ("parse error in template argument list"); } pop_deferring_access_checks (); return template_id; } /* Parse a template-name. template-name: identifier The standard should actually say: template-name: identifier operator-function-id A defect report has been filed about this issue. A conversion-function-id cannot be a template name because they cannot be part of a template-id. In fact, looking at this code: a.operator K<int>() the conversion-function-id is "operator K<int>", and K<int> is a type-id. It is impossible to call a templated conversion-function-id with an explicit argument list, since the only allowed template parameter is the type to which it is converting. If TEMPLATE_KEYWORD_P is true, then we have just seen the `template' keyword, in a construction like: T::template f<3>() In that case `f' is taken to be a template-name, even though there is no way of knowing for sure. Returns the TEMPLATE_DECL for the template, or an OVERLOAD if the name refers to a set of overloaded functions, at least one of which is a template, or an IDENTIFIER_NODE with the name of the template, if TEMPLATE_KEYWORD_P is true. If CHECK_DEPENDENCY_P is FALSE, names are looked up inside uninstantiated templates. */ static tree cp_parser_template_name (cp_parser* parser, bool template_keyword_p, bool check_dependency_p, bool is_declaration, bool *is_identifier) { tree identifier; tree decl; tree fns; /* If the next token is `operator', then we have either an operator-function-id or a conversion-function-id. */ if (cp_lexer_next_token_is_keyword (parser->lexer, RID_OPERATOR)) { /* We don't know whether we're looking at an operator-function-id or a conversion-function-id. */ cp_parser_parse_tentatively (parser); /* Try an operator-function-id. */ identifier = cp_parser_operator_function_id (parser); /* If that didn't work, try a conversion-function-id. */ if (!cp_parser_parse_definitely (parser)) { cp_parser_error (parser, "expected template-name"); return error_mark_node; } } /* Look for the identifier. */ else identifier = cp_parser_identifier (parser); /* If we didn't find an identifier, we don't have a template-id. */ if (identifier == error_mark_node) return error_mark_node; /* If the name immediately followed the `template' keyword, then it is a template-name. However, if the next token is not `<', then we do not treat it as a template-name, since it is not being used as part of a template-id. This enables us to handle constructs like: template <typename T> struct S { S(); }; template <typename T> S<T>::S(); correctly. We would treat `S' as a template -- if it were `S<T>' -- but we do not if there is no `<'. */ if (processing_template_decl && cp_parser_nth_token_starts_template_argument_list_p (parser, 1)) { /* In a declaration, in a dependent context, we pretend that the "template" keyword was present in order to improve error recovery. For example, given: template <typename T> void f(T::X<int>); we want to treat "X<int>" as a template-id. */ if (is_declaration && !template_keyword_p && parser->scope && TYPE_P (parser->scope) && check_dependency_p && dependent_type_p (parser->scope) /* Do not do this for dtors (or ctors), since they never need the template keyword before their name. */ && !constructor_name_p (identifier, parser->scope)) { cp_token_position start = 0; /* Explain what went wrong. */ error ("non-template %qD used as template", identifier); inform ("use %<%T::template %D%> to indicate that it is a template", parser->scope, identifier); /* If parsing tentatively, find the location of the "<" token. */ if (cp_parser_simulate_error (parser)) start = cp_lexer_token_position (parser->lexer, true); /* Parse the template arguments so that we can issue error messages about them. */ cp_lexer_consume_token (parser->lexer); cp_parser_enclosed_template_argument_list (parser); /* Skip tokens until we find a good place from which to continue parsing. */ cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true, /*or_comma=*/true, /*consume_paren=*/false); /* If parsing tentatively, permanently remove the template argument list. That will prevent duplicate error messages from being issued about the missing "template" keyword. */ if (start) cp_lexer_purge_tokens_after (parser->lexer, start); if (is_identifier) *is_identifier = true; return identifier; } /* If the "template" keyword is present, then there is generally no point in doing name-lookup, so we just return IDENTIFIER. But, if the qualifying scope is non-dependent then we can (and must) do name-lookup normally. */ if (template_keyword_p && (!parser->scope || (TYPE_P (parser->scope) && dependent_type_p (parser->scope)))) return identifier; } /* Look up the name. */ decl = cp_parser_lookup_name (parser, identifier, none_type, /*is_template=*/false, /*is_namespace=*/false, check_dependency_p, /*ambiguous_decls=*/NULL); decl = maybe_get_template_decl_from_type_decl (decl); /* If DECL is a template, then the name was a template-name. */ if (TREE_CODE (decl) == TEMPLATE_DECL) ; else { tree fn = NULL_TREE; /* The standard does not explicitly indicate whether a name that names a set of overloaded declarations, some of which are templates, is a template-name. However, such a name should be a template-name; otherwise, there is no way to form a template-id for the overloaded templates. */ fns = BASELINK_P (decl) ? BASELINK_FUNCTIONS (decl) : decl; if (TREE_CODE (fns) == OVERLOAD) for (fn = fns; fn; fn = OVL_NEXT (fn)) if (TREE_CODE (OVL_CURRENT (fn)) == TEMPLATE_DECL) break; if (!fn) { /* The name does not name a template. */ cp_parser_error (parser, "expected template-name"); return error_mark_node; } } /* If DECL is dependent, and refers to a function, then just return its name; we will look it up again during template instantiation. */ if (DECL_FUNCTION_TEMPLATE_P (decl) || !DECL_P (decl)) { tree scope = CP_DECL_CONTEXT (get_first_fn (decl)); if (TYPE_P (scope) && dependent_type_p (scope)) return identifier; } return decl; } /* Parse a template-argument-list. template-argument-list: template-argument template-argument-list , template-argument Returns a TREE_VEC containing the arguments. */ static tree cp_parser_template_argument_list (cp_parser* parser) { tree fixed_args[10]; unsigned n_args = 0; unsigned alloced = 10; tree *arg_ary = fixed_args; tree vec; bool saved_in_template_argument_list_p; bool saved_ice_p; bool saved_non_ice_p; saved_in_template_argument_list_p = parser->in_template_argument_list_p; parser->in_template_argument_list_p = true; /* Even if the template-id appears in an integral constant-expression, the contents of the argument list do not. */ saved_ice_p = parser->integral_constant_expression_p; parser->integral_constant_expression_p = false; saved_non_ice_p = parser->non_integral_constant_expression_p; parser->non_integral_constant_expression_p = false; /* Parse the arguments. */ do { tree argument; if (n_args) /* Consume the comma. */ cp_lexer_consume_token (parser->lexer); /* Parse the template-argument. */ argument = cp_parser_template_argument (parser); if (n_args == alloced) { alloced *= 2; if (arg_ary == fixed_args) { arg_ary = XNEWVEC (tree, alloced); memcpy (arg_ary, fixed_args, sizeof (tree) * n_args); } else arg_ary = XRESIZEVEC (tree, arg_ary, alloced); } arg_ary[n_args++] = argument; } while (cp_lexer_next_token_is (parser->lexer, CPP_COMMA)); vec = make_tree_vec (n_args); while (n_args--) TREE_VEC_ELT (vec, n_args) = arg_ary[n_args]; if (arg_ary != fixed_args) free (arg_ary); parser->non_integral_constant_expression_p = saved_non_ice_p; parser->integral_constant_expression_p = saved_ice_p; parser->in_template_argument_list_p = saved_in_template_argument_list_p; return vec; } /* Parse a template-argument. template-argument: assignment-expression type-id id-expression The representation is that of an assignment-expression, type-id, or id-expression -- except that the qualified id-expression is evaluated, so that the value returned is either a DECL or an OVERLOAD. Although the standard says "assignment-expression", it forbids throw-expressions or assignments in the template argument. Therefore, we use "conditional-expression" instead. */ static tree cp_parser_template_argument (cp_parser* parser) { tree argument; bool template_p; bool address_p; bool maybe_type_id = false; cp_token *token; cp_id_kind idk; /* There's really no way to know what we're looking at, so we just try each alternative in order. [temp.arg] In a template-argument, an ambiguity between a type-id and an expression is resolved to a type-id, regardless of the form of the corresponding template-parameter. Therefore, we try a type-id first. */ cp_parser_parse_tentatively (parser); argument = cp_parser_type_id (parser); /* If there was no error parsing the type-id but the next token is a '>>', we probably found a typo for '> >'. But there are type-id which are also valid expressions. For instance: struct X { int operator >> (int); }; template <int V> struct Foo {}; Foo<X () >> 5> r; Here 'X()' is a valid type-id of a function type, but the user just wanted to write the expression "X() >> 5". Thus, we remember that we found a valid type-id, but we still try to parse the argument as an expression to see what happens. */ if (!cp_parser_error_occurred (parser) && cp_lexer_next_token_is (parser->lexer, CPP_RSHIFT)) { maybe_type_id = true; cp_parser_abort_tentative_parse (parser); } else { /* If the next token isn't a `,' or a `>', then this argument wasn't really finished. This means that the argument is not a valid type-id. */ if (!cp_parser_next_token_ends_template_argument_p (parser)) cp_parser_error (parser, "expected template-argument"); /* If that worked, we're done. */ if (cp_parser_parse_definitely (parser)) return argument; } /* We're still not sure what the argument will be. */ cp_parser_parse_tentatively (parser); /* Try a template. */ argument = cp_parser_id_expression (parser, /*template_keyword_p=*/false, /*check_dependency_p=*/true, &template_p, /*declarator_p=*/false, /*optional_p=*/false); /* If the next token isn't a `,' or a `>', then this argument wasn't really finished. */ if (!cp_parser_next_token_ends_template_argument_p (parser)) cp_parser_error (parser, "expected template-argument"); if (!cp_parser_error_occurred (parser)) { /* Figure out what is being referred to. If the id-expression was for a class template specialization, then we will have a TYPE_DECL at this point. There is no need to do name lookup at this point in that case. */ if (TREE_CODE (argument) != TYPE_DECL) argument = cp_parser_lookup_name (parser, argument, none_type, /*is_template=*/template_p, /*is_namespace=*/false, /*check_dependency=*/true, /*ambiguous_decls=*/NULL); if (TREE_CODE (argument) != TEMPLATE_DECL && TREE_CODE (argument) != UNBOUND_CLASS_TEMPLATE) cp_parser_error (parser, "expected template-name"); } if (cp_parser_parse_definitely (parser)) return argument; /* It must be a non-type argument. There permitted cases are given in [temp.arg.nontype]: -- an integral constant-expression of integral or enumeration type; or -- the name of a non-type template-parameter; or -- the name of an object or function with external linkage... -- the address of an object or function with external linkage... -- a pointer to member... */ /* Look for a non-type template parameter. */ if (cp_lexer_next_token_is (parser->lexer, CPP_NAME)) { cp_parser_parse_tentatively (parser); argument = cp_parser_primary_expression (parser, /*adress_p=*/false, /*cast_p=*/false, /*template_arg_p=*/true, &idk); if (TREE_CODE (argument) != TEMPLATE_PARM_INDEX || !cp_parser_next_token_ends_template_argument_p (parser)) cp_parser_simulate_error (parser); if (cp_parser_parse_definitely (parser)) return argument; } /* If the next token is "&", the argument must be the address of an object or function with external linkage. */ address_p = cp_lexer_next_token_is (parser->lexer, CPP_AND); if (address_p) cp_lexer_consume_token (parser->lexer); /* See if we might have an id-expression. */ token = cp_lexer_peek_token (parser->lexer); if (token->type == CPP_NAME || token->keyword == RID_OPERATOR || token->type == CPP_SCOPE || token->type == CPP_TEMPLATE_ID || token->type == CPP_NESTED_NAME_SPECIFIER) { cp_parser_parse_tentatively (parser); argument = cp_parser_primary_expression (parser, address_p, /*cast_p=*/false, /*template_arg_p=*/true, &idk); if (cp_parser_error_occurred (parser) || !cp_parser_next_token_ends_template_argument_p (parser)) cp_parser_abort_tentative_parse (parser); else { if (TREE_CODE (argument) == INDIRECT_REF) { gcc_assert (REFERENCE_REF_P (argument)); argument = TREE_OPERAND (argument, 0); } if (TREE_CODE (argument) == VAR_DECL) { /* A variable without external linkage might still be a valid constant-expression, so no error is issued here if the external-linkage check fails. */ if (!address_p && !DECL_EXTERNAL_LINKAGE_P (argument)) cp_parser_simulate_error (parser); } else if (is_overloaded_fn (argument)) /* All overloaded functions are allowed; if the external linkage test does not pass, an error will be issued later. */ ; else if (address_p && (TREE_CODE (argument) == OFFSET_REF || TREE_CODE (argument) == SCOPE_REF)) /* A pointer-to-member. */ ; else if (TREE_CODE (argument) == TEMPLATE_PARM_INDEX) ; else cp_parser_simulate_error (parser); if (cp_parser_parse_definitely (parser)) { if (address_p) argument = build_x_unary_op (ADDR_EXPR, argument); return argument; } } } /* If the argument started with "&", there are no other valid alternatives at this point. */ if (address_p) { cp_parser_error (parser, "invalid non-type template argument"); return error_mark_node; } /* If the argument wasn't successfully parsed as a type-id followed by '>>', the argument can only be a constant expression now. Otherwise, we try parsing the constant-expression tentatively, because the argument could really be a type-id. */ if (maybe_type_id) cp_parser_parse_tentatively (parser); argument = cp_parser_constant_expression (parser, /*allow_non_constant_p=*/false, /*non_constant_p=*/NULL); argument = fold_non_dependent_expr (argument); if (!maybe_type_id) return argument; if (!cp_parser_next_token_ends_template_argument_p (parser)) cp_parser_error (parser, "expected template-argument"); if (cp_parser_parse_definitely (parser)) return argument; /* We did our best to parse the argument as a non type-id, but that was the only alternative that matched (albeit with a '>' after it). We can assume it's just a typo from the user, and a diagnostic will then be issued. */ return cp_parser_type_id (parser); } /* Parse an explicit-instantiation. explicit-instantiation: template declaration Although the standard says `declaration', what it really means is: explicit-instantiation: template decl-specifier-seq [opt] declarator [opt] ; Things like `template int S<int>::i = 5, int S<double>::j;' are not supposed to be allowed. A defect report has been filed about this issue. GNU Extension: explicit-instantiation: storage-class-specifier template decl-specifier-seq [opt] declarator [opt] ; function-specifier template decl-specifier-seq [opt] declarator [opt] ; */ static void cp_parser_explicit_instantiation (cp_parser* parser) { int declares_class_or_enum; cp_decl_specifier_seq decl_specifiers; tree extension_specifier = NULL_TREE; /* Look for an (optional) storage-class-specifier or function-specifier. */ if (cp_parser_allow_gnu_extensions_p (parser)) { extension_specifier = cp_parser_storage_class_specifier_opt (parser); if (!extension_specifier) extension_specifier = cp_parser_function_specifier_opt (parser, /*decl_specs=*/NULL); } /* Look for the `template' keyword. */ cp_parser_require_keyword (parser, RID_TEMPLATE, "`template'"); /* Let the front end know that we are processing an explicit instantiation. */ begin_explicit_instantiation (); /* [temp.explicit] says that we are supposed to ignore access control while processing explicit instantiation directives. */ push_deferring_access_checks (dk_no_check); /* Parse a decl-specifier-seq. */ cp_parser_decl_specifier_seq (parser, CP_PARSER_FLAGS_OPTIONAL, &decl_specifiers, &declares_class_or_enum); /* If there was exactly one decl-specifier, and it declared a class, and there's no declarator, then we have an explicit type instantiation. */ if (declares_class_or_enum && cp_parser_declares_only_class_p (parser)) { tree type; type = check_tag_decl (&decl_specifiers); /* Turn access control back on for names used during template instantiation. */ pop_deferring_access_checks (); if (type) do_type_instantiation (type, extension_specifier, /*complain=*/tf_error); } else { cp_declarator *declarator; tree decl; /* Parse the declarator. */ declarator = cp_parser_declarator (parser, CP_PARSER_DECLARATOR_NAMED, /*ctor_dtor_or_conv_p=*/NULL, /*parenthesized_p=*/NULL, /*member_p=*/false); if (declares_class_or_enum & 2) cp_parser_check_for_definition_in_return_type (declarator, decl_specifiers.type); if (declarator != cp_error_declarator) { decl = grokdeclarator (declarator, &decl_specifiers, NORMAL, 0, &decl_specifiers.attributes); /* Turn access control back on for names used during template instantiation. */ pop_deferring_access_checks (); /* Do the explicit instantiation. */ do_decl_instantiation (decl, extension_specifier); } else { pop_deferring_access_checks (); /* Skip the body of the explicit instantiation. */ cp_parser_skip_to_end_of_statement (parser); } } /* We're done with the instantiation. */ end_explicit_instantiation (); cp_parser_consume_semicolon_at_end_of_statement (parser); } /* Parse an explicit-specialization. explicit-specialization: template < > declaration Although the standard says `declaration', what it really means is: explicit-specialization: template <> decl-specifier [opt] init-declarator [opt] ; template <> function-definition template <> explicit-specialization template <> template-declaration */ static void cp_parser_explicit_specialization (cp_parser* parser) { bool need_lang_pop; /* Look for the `template' keyword. */ cp_parser_require_keyword (parser, RID_TEMPLATE, "`template'"); /* Look for the `<'. */ cp_parser_require (parser, CPP_LESS, "`<'"); /* Look for the `>'. */ cp_parser_require (parser, CPP_GREATER, "`>'"); /* We have processed another parameter list. */ ++parser->num_template_parameter_lists; /* [temp] A template ... explicit specialization ... shall not have C linkage. */ if (current_lang_name == lang_name_c) { error ("template specialization with C linkage"); /* Give it C++ linkage to avoid confusing other parts of the front end. */ push_lang_context (lang_name_cplusplus); need_lang_pop = true; } else need_lang_pop = false; /* Let the front end know that we are beginning a specialization. */ if (!begin_specialization ()) { end_specialization (); cp_parser_skip_to_end_of_block_or_statement (parser); return; } /* If the next keyword is `template', we need to figure out whether or not we're looking a template-declaration. */ if (cp_lexer_next_token_is_keyword (parser->lexer, RID_TEMPLATE)) { if (cp_lexer_peek_nth_token (parser->lexer, 2)->type == CPP_LESS && cp_lexer_peek_nth_token (parser->lexer, 3)->type != CPP_GREATER) cp_parser_template_declaration_after_export (parser, /*member_p=*/false); else cp_parser_explicit_specialization (parser); } else /* Parse the dependent declaration. */ cp_parser_single_declaration (parser, /*checks=*/NULL, /*member_p=*/false, /*friend_p=*/NULL); /* We're done with the specialization. */ end_specialization (); /* For the erroneous case of a template with C linkage, we pushed an implicit C++ linkage scope; exit that scope now. */ if (need_lang_pop) pop_lang_context (); /* We're done with this parameter list. */ --parser->num_template_parameter_lists; } /* Parse a type-specifier. type-specifier: simple-type-specifier class-specifier enum-specifier elaborated-type-specifier cv-qualifier GNU Extension: type-specifier: __complex__ Returns a representation of the type-specifier. For a class-specifier, enum-specifier, or elaborated-type-specifier, a TREE_TYPE is returned; otherwise, a TYPE_DECL is returned. The parser flags FLAGS is used to control type-specifier parsing. If IS_DECLARATION is TRUE, then this type-specifier is appearing in a decl-specifier-seq. If DECLARES_CLASS_OR_ENUM is non-NULL, and the type-specifier is a class-specifier, enum-specifier, or elaborated-type-specifier, then *DECLARES_CLASS_OR_ENUM is set to a nonzero value. The value is 1 if a type is declared; 2 if it is defined. Otherwise, it is set to zero. If IS_CV_QUALIFIER is non-NULL, and the type-specifier is a cv-qualifier, then IS_CV_QUALIFIER is set to TRUE. Otherwise, it is set to FALSE. */ static tree cp_parser_type_specifier (cp_parser* parser, cp_parser_flags flags, cp_decl_specifier_seq *decl_specs, bool is_declaration, int* declares_class_or_enum, bool* is_cv_qualifier) { tree type_spec = NULL_TREE; cp_token *token; enum rid keyword; cp_decl_spec ds = ds_last; /* Assume this type-specifier does not declare a new type. */ if (declares_class_or_enum) *declares_class_or_enum = 0; /* And that it does not specify a cv-qualifier. */ if (is_cv_qualifier) *is_cv_qualifier = false; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* If we're looking at a keyword, we can use that to guide the production we choose. */ keyword = token->keyword; switch (keyword) { case RID_ENUM: /* Look for the enum-specifier. */ type_spec = cp_parser_enum_specifier (parser); /* If that worked, we're done. */ if (type_spec) { if (declares_class_or_enum) *declares_class_or_enum = 2; if (decl_specs) cp_parser_set_decl_spec_type (decl_specs, type_spec, /*user_defined_p=*/true); return type_spec; } else goto elaborated_type_specifier; /* Any of these indicate either a class-specifier, or an elaborated-type-specifier. */ case RID_CLASS: case RID_STRUCT: case RID_UNION: /* Parse tentatively so that we can back up if we don't find a class-specifier. */ cp_parser_parse_tentatively (parser); /* Look for the class-specifier. */ type_spec = cp_parser_class_specifier (parser); /* If that worked, we're done. */ if (cp_parser_parse_definitely (parser)) { if (declares_class_or_enum) *declares_class_or_enum = 2; if (decl_specs) cp_parser_set_decl_spec_type (decl_specs, type_spec, /*user_defined_p=*/true); return type_spec; } /* Fall through. */ elaborated_type_specifier: /* We're declaring (not defining) a class or enum. */ if (declares_class_or_enum) *declares_class_or_enum = 1; /* Fall through. */ case RID_TYPENAME: /* Look for an elaborated-type-specifier. */ type_spec = (cp_parser_elaborated_type_specifier (parser, decl_specs && decl_specs->specs[(int) ds_friend], is_declaration)); if (decl_specs) cp_parser_set_decl_spec_type (decl_specs, type_spec, /*user_defined_p=*/true); return type_spec; case RID_CONST: ds = ds_const; if (is_cv_qualifier) *is_cv_qualifier = true; break; case RID_VOLATILE: ds = ds_volatile; if (is_cv_qualifier) *is_cv_qualifier = true; break; case RID_RESTRICT: ds = ds_restrict; if (is_cv_qualifier) *is_cv_qualifier = true; break; case RID_COMPLEX: /* The `__complex__' keyword is a GNU extension. */ ds = ds_complex; break; default: break; } /* Handle simple keywords. */ if (ds != ds_last) { if (decl_specs) { ++decl_specs->specs[(int)ds]; decl_specs->any_specifiers_p = true; } return cp_lexer_consume_token (parser->lexer)->u.value; } /* If we do not already have a type-specifier, assume we are looking at a simple-type-specifier. */ type_spec = cp_parser_simple_type_specifier (parser, decl_specs, flags); /* If we didn't find a type-specifier, and a type-specifier was not optional in this context, issue an error message. */ if (!type_spec && !(flags & CP_PARSER_FLAGS_OPTIONAL)) { cp_parser_error (parser, "expected type specifier"); return error_mark_node; } return type_spec; } /* Parse a simple-type-specifier. simple-type-specifier: :: [opt] nested-name-specifier [opt] type-name :: [opt] nested-name-specifier template template-id char wchar_t bool short int long signed unsigned float double void GNU Extension: simple-type-specifier: __typeof__ unary-expression __typeof__ ( type-id ) Returns the indicated TYPE_DECL. If DECL_SPECS is not NULL, it is appropriately updated. */ static tree cp_parser_simple_type_specifier (cp_parser* parser, cp_decl_specifier_seq *decl_specs, cp_parser_flags flags) { tree type = NULL_TREE; cp_token *token; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* If we're looking at a keyword, things are easy. */ switch (token->keyword) { case RID_CHAR: if (decl_specs) decl_specs->explicit_char_p = true; type = char_type_node; break; case RID_WCHAR: type = wchar_type_node; break; case RID_BOOL: type = boolean_type_node; break; case RID_SHORT: if (decl_specs) ++decl_specs->specs[(int) ds_short]; type = short_integer_type_node; break; case RID_INT: if (decl_specs) decl_specs->explicit_int_p = true; type = integer_type_node; break; case RID_LONG: if (decl_specs) ++decl_specs->specs[(int) ds_long]; type = long_integer_type_node; break; case RID_SIGNED: if (decl_specs) ++decl_specs->specs[(int) ds_signed]; type = integer_type_node; break; case RID_UNSIGNED: if (decl_specs) ++decl_specs->specs[(int) ds_unsigned]; type = unsigned_type_node; break; case RID_FLOAT: type = float_type_node; break; case RID_DOUBLE: type = double_type_node; break; case RID_VOID: type = void_type_node; break; case RID_TYPEOF: /* Consume the `typeof' token. */ cp_lexer_consume_token (parser->lexer); /* Parse the operand to `typeof'. */ type = cp_parser_sizeof_operand (parser, RID_TYPEOF); /* If it is not already a TYPE, take its type. */ if (!TYPE_P (type)) type = finish_typeof (type); if (decl_specs) cp_parser_set_decl_spec_type (decl_specs, type, /*user_defined_p=*/true); return type; default: break; } /* If the type-specifier was for a built-in type, we're done. */ if (type) { tree id; /* Record the type. */ if (decl_specs && (token->keyword != RID_SIGNED && token->keyword != RID_UNSIGNED && token->keyword != RID_SHORT && token->keyword != RID_LONG)) cp_parser_set_decl_spec_type (decl_specs, type, /*user_defined=*/false); if (decl_specs) decl_specs->any_specifiers_p = true; /* Consume the token. */ id = cp_lexer_consume_token (parser->lexer)->u.value; /* There is no valid C++ program where a non-template type is followed by a "<". That usually indicates that the user thought that the type was a template. */ cp_parser_check_for_invalid_template_id (parser, type); return TYPE_NAME (type); } /* The type-specifier must be a user-defined type. */ if (!(flags & CP_PARSER_FLAGS_NO_USER_DEFINED_TYPES)) { bool qualified_p; bool global_p; /* Don't gobble tokens or issue error messages if this is an optional type-specifier. */ if (flags & CP_PARSER_FLAGS_OPTIONAL) cp_parser_parse_tentatively (parser); /* Look for the optional `::' operator. */ global_p = (cp_parser_global_scope_opt (parser, /*current_scope_valid_p=*/false) != NULL_TREE); /* Look for the nested-name specifier. */ qualified_p = (cp_parser_nested_name_specifier_opt (parser, /*typename_keyword_p=*/false, /*check_dependency_p=*/true, /*type_p=*/false, /*is_declaration=*/false) != NULL_TREE); /* If we have seen a nested-name-specifier, and the next token is `template', then we are using the template-id production. */ if (parser->scope && cp_parser_optional_template_keyword (parser)) { /* Look for the template-id. */ type = cp_parser_template_id (parser, /*template_keyword_p=*/true, /*check_dependency_p=*/true, /*is_declaration=*/false); /* If the template-id did not name a type, we are out of luck. */ if (TREE_CODE (type) != TYPE_DECL) { cp_parser_error (parser, "expected template-id for type"); type = NULL_TREE; } } /* Otherwise, look for a type-name. */ else type = cp_parser_type_name (parser); /* Keep track of all name-lookups performed in class scopes. */ if (type && !global_p && !qualified_p && TREE_CODE (type) == TYPE_DECL && TREE_CODE (DECL_NAME (type)) == IDENTIFIER_NODE) maybe_note_name_used_in_class (DECL_NAME (type), type); /* If it didn't work out, we don't have a TYPE. */ if ((flags & CP_PARSER_FLAGS_OPTIONAL) && !cp_parser_parse_definitely (parser)) type = NULL_TREE; if (type && decl_specs) cp_parser_set_decl_spec_type (decl_specs, type, /*user_defined=*/true); } /* If we didn't get a type-name, issue an error message. */ if (!type && !(flags & CP_PARSER_FLAGS_OPTIONAL)) { cp_parser_error (parser, "expected type-name"); return error_mark_node; } /* There is no valid C++ program where a non-template type is followed by a "<". That usually indicates that the user thought that the type was a template. */ if (type && type != error_mark_node) { /* As a last-ditch effort, see if TYPE is an Objective-C type. If it is, then the '<'...'>' enclose protocol names rather than template arguments, and so everything is fine. */ /* APPLE LOCAL radar 4516785 */ if (c_dialect_objc () && !parser->scope && (objc_is_id (type) || objc_is_class_name (type))) { tree protos = cp_parser_objc_protocol_refs_opt (parser); tree qual_type = objc_get_protocol_qualified_type (type, protos); /* Clobber the "unqualified" type previously entered into DECL_SPECS with the new, improved protocol-qualified version. */ if (decl_specs) decl_specs->type = qual_type; return qual_type; } cp_parser_check_for_invalid_template_id (parser, TREE_TYPE (type)); } return type; } /* Parse a type-name. type-name: class-name enum-name typedef-name enum-name: identifier typedef-name: identifier Returns a TYPE_DECL for the type. */ static tree cp_parser_type_name (cp_parser* parser) { tree type_decl; tree identifier; /* We can't know yet whether it is a class-name or not. */ cp_parser_parse_tentatively (parser); /* Try a class-name. */ type_decl = cp_parser_class_name (parser, /*typename_keyword_p=*/false, /*template_keyword_p=*/false, none_type, /*check_dependency_p=*/true, /*class_head_p=*/false, /*is_declaration=*/false); /* If it's not a class-name, keep looking. */ if (!cp_parser_parse_definitely (parser)) { /* It must be a typedef-name or an enum-name. */ identifier = cp_parser_identifier (parser); if (identifier == error_mark_node) return error_mark_node; /* Look up the type-name. */ type_decl = cp_parser_lookup_name_simple (parser, identifier); if (TREE_CODE (type_decl) != TYPE_DECL && (objc_is_id (identifier) || objc_is_class_name (identifier))) { /* See if this is an Objective-C type. */ /* APPLE LOCAL begin radar 5355344 */ tree protos; if (cp_parser_objc_tentative_protocol_refs_opt (parser, &protos)) { tree type = objc_get_protocol_qualified_type (identifier, protos); if (type) type_decl = TYPE_NAME (type); } /* APPLE LOCAL end radar 5355344 */ } /* Issue an error if we did not find a type-name. */ /* APPLE LOCAL begin radar 5277239 */ if (TREE_CODE (type_decl) != TYPE_DECL || cp_objc_property_reference_prefix (parser, TREE_TYPE (type_decl))) /* APPLE LOCAL end radar 5277239 */ { if (!cp_parser_simulate_error (parser)) cp_parser_name_lookup_error (parser, identifier, type_decl, "is not a type"); type_decl = error_mark_node; } /* Remember that the name was used in the definition of the current class so that we can check later to see if the meaning would have been different after the class was entirely defined. */ else if (type_decl != error_mark_node && !parser->scope) maybe_note_name_used_in_class (identifier, type_decl); } return type_decl; } /* Parse an elaborated-type-specifier. Note that the grammar given here incorporates the resolution to DR68. elaborated-type-specifier: class-key :: [opt] nested-name-specifier [opt] identifier class-key :: [opt] nested-name-specifier [opt] template [opt] template-id enum :: [opt] nested-name-specifier [opt] identifier typename :: [opt] nested-name-specifier identifier typename :: [opt] nested-name-specifier template [opt] template-id GNU extension: elaborated-type-specifier: class-key attributes :: [opt] nested-name-specifier [opt] identifier class-key attributes :: [opt] nested-name-specifier [opt] template [opt] template-id enum attributes :: [opt] nested-name-specifier [opt] identifier If IS_FRIEND is TRUE, then this elaborated-type-specifier is being declared `friend'. If IS_DECLARATION is TRUE, then this elaborated-type-specifier appears in a decl-specifiers-seq, i.e., something is being declared. Returns the TYPE specified. */ static tree cp_parser_elaborated_type_specifier (cp_parser* parser, bool is_friend, bool is_declaration) { enum tag_types tag_type; tree identifier; tree type = NULL_TREE; tree attributes = NULL_TREE; /* See if we're looking at the `enum' keyword. */ if (cp_lexer_next_token_is_keyword (parser->lexer, RID_ENUM)) { /* Consume the `enum' token. */ cp_lexer_consume_token (parser->lexer); /* Remember that it's an enumeration type. */ tag_type = enum_type; /* Parse the attributes. */ attributes = cp_parser_attributes_opt (parser); } /* Or, it might be `typename'. */ else if (cp_lexer_next_token_is_keyword (parser->lexer, RID_TYPENAME)) { /* Consume the `typename' token. */ cp_lexer_consume_token (parser->lexer); /* Remember that it's a `typename' type. */ tag_type = typename_type; /* The `typename' keyword is only allowed in templates. */ if (!processing_template_decl) pedwarn ("using %<typename%> outside of template"); } /* Otherwise it must be a class-key. */ else { tag_type = cp_parser_class_key (parser); if (tag_type == none_type) return error_mark_node; /* Parse the attributes. */ attributes = cp_parser_attributes_opt (parser); } /* Look for the `::' operator. */ cp_parser_global_scope_opt (parser, /*current_scope_valid_p=*/false); /* Look for the nested-name-specifier. */ if (tag_type == typename_type) { if (!cp_parser_nested_name_specifier (parser, /*typename_keyword_p=*/true, /*check_dependency_p=*/true, /*type_p=*/true, is_declaration)) return error_mark_node; } else /* Even though `typename' is not present, the proposed resolution to Core Issue 180 says that in `class A<T>::B', `B' should be considered a type-name, even if `A<T>' is dependent. */ cp_parser_nested_name_specifier_opt (parser, /*typename_keyword_p=*/true, /*check_dependency_p=*/true, /*type_p=*/true, is_declaration); /* For everything but enumeration types, consider a template-id. For an enumeration type, consider only a plain identifier. */ if (tag_type != enum_type) { bool template_p = false; tree decl; /* Allow the `template' keyword. */ template_p = cp_parser_optional_template_keyword (parser); /* If we didn't see `template', we don't know if there's a template-id or not. */ if (!template_p) cp_parser_parse_tentatively (parser); /* Parse the template-id. */ decl = cp_parser_template_id (parser, template_p, /*check_dependency_p=*/true, is_declaration); /* If we didn't find a template-id, look for an ordinary identifier. */ if (!template_p && !cp_parser_parse_definitely (parser)) ; /* If DECL is a TEMPLATE_ID_EXPR, and the `typename' keyword is in effect, then we must assume that, upon instantiation, the template will correspond to a class. */ else if (TREE_CODE (decl) == TEMPLATE_ID_EXPR && tag_type == typename_type) type = make_typename_type (parser->scope, decl, typename_type, /*complain=*/tf_error); else type = TREE_TYPE (decl); } if (!type) { identifier = cp_parser_identifier (parser); if (identifier == error_mark_node) { parser->scope = NULL_TREE; return error_mark_node; } /* For a `typename', we needn't call xref_tag. */ if (tag_type == typename_type && TREE_CODE (parser->scope) != NAMESPACE_DECL) return cp_parser_make_typename_type (parser, parser->scope, identifier); /* Look up a qualified name in the usual way. */ if (parser->scope) { tree decl; /* LLVM LOCAL begin mainline */ tree ambiguous_decls; /* LLVM LOCAL end mainline */ decl = cp_parser_lookup_name (parser, identifier, tag_type, /*is_template=*/false, /*is_namespace=*/false, /*check_dependency=*/true, /* LLVM LOCAL begin mainline */ &ambiguous_decls); /* LLVM LOCAL end mainline */ /* LLVM LOCAL begin mainline */ /* If the lookup was ambiguous, an error will already have been issued. */ if (ambiguous_decls) return error_mark_node; /* LLVM LOCAL end mainline */ /* If we are parsing friend declaration, DECL may be a TEMPLATE_DECL tree node here. However, we need to check whether this TEMPLATE_DECL results in valid code. Consider the following example: namespace N { template <class T> class C {}; } class X { template <class T> friend class N::C; // #1, valid code }; template <class T> class Y { friend class N::C; // #2, invalid code }; For both case #1 and #2, we arrive at a TEMPLATE_DECL after name lookup of `N::C'. We see that friend declaration must be template for the code to be valid. Note that processing_template_decl does not work here since it is always 1 for the above two cases. */ decl = (cp_parser_maybe_treat_template_as_class (decl, /*tag_name_p=*/is_friend && parser->num_template_parameter_lists)); if (TREE_CODE (decl) != TYPE_DECL) { cp_parser_diagnose_invalid_type_name (parser, parser->scope, identifier); return error_mark_node; } if (TREE_CODE (TREE_TYPE (decl)) != TYPENAME_TYPE) { bool allow_template = (parser->num_template_parameter_lists || DECL_SELF_REFERENCE_P (decl)); type = check_elaborated_type_specifier (tag_type, decl, allow_template); if (type == error_mark_node) return error_mark_node; } type = TREE_TYPE (decl); } else { /* An elaborated-type-specifier sometimes introduces a new type and sometimes names an existing type. Normally, the rule is that it introduces a new type only if there is not an existing type of the same name already in scope. For example, given: struct S {}; void f() { struct S s; } the `struct S' in the body of `f' is the same `struct S' as in the global scope; the existing definition is used. However, if there were no global declaration, this would introduce a new local class named `S'. An exception to this rule applies to the following code: namespace N { struct S; } Here, the elaborated-type-specifier names a new type unconditionally; even if there is already an `S' in the containing scope this declaration names a new type. This exception only applies if the elaborated-type-specifier forms the complete declaration: [class.name] A declaration consisting solely of `class-key identifier ;' is either a redeclaration of the name in the current scope or a forward declaration of the identifier as a class name. It introduces the name into the current scope. We are in this situation precisely when the next token is a `;'. An exception to the exception is that a `friend' declaration does *not* name a new type; i.e., given: struct S { friend struct T; }; `T' is not a new type in the scope of `S'. Also, `new struct S' or `sizeof (struct S)' never results in the definition of a new type; a new type can only be declared in a declaration context. */ tag_scope ts; bool template_p; if (is_friend) /* Friends have special name lookup rules. */ ts = ts_within_enclosing_non_class; else if (is_declaration && cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON)) /* This is a `class-key identifier ;' */ ts = ts_current; else ts = ts_global; template_p = (parser->num_template_parameter_lists && (cp_parser_next_token_starts_class_definition_p (parser) || cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON))); /* An unqualified name was used to reference this type, so there were no qualifying templates. */ if (!cp_parser_check_template_parameters (parser, /*num_templates=*/0)) return error_mark_node; type = xref_tag (tag_type, identifier, ts, template_p); } } if (type == error_mark_node) return error_mark_node; /* Allow attributes on forward declarations of classes. */ if (attributes) { if (TREE_CODE (type) == TYPENAME_TYPE) warning (OPT_Wattributes, "attributes ignored on uninstantiated type"); else if (tag_type != enum_type && CLASSTYPE_TEMPLATE_INSTANTIATION (type) && ! processing_explicit_instantiation) warning (OPT_Wattributes, "attributes ignored on template instantiation"); else if (is_declaration && cp_parser_declares_only_class_p (parser)) cplus_decl_attributes (&type, attributes, (int) ATTR_FLAG_TYPE_IN_PLACE); else warning (OPT_Wattributes, "attributes ignored on elaborated-type-specifier that is not a forward declaration"); } if (tag_type != enum_type) cp_parser_check_class_key (tag_type, type); /* A "<" cannot follow an elaborated type specifier. If that happens, the user was probably trying to form a template-id. */ cp_parser_check_for_invalid_template_id (parser, type); return type; } /* Parse an enum-specifier. enum-specifier: enum identifier [opt] { enumerator-list [opt] } GNU Extensions: enum attributes[opt] identifier [opt] { enumerator-list [opt] } attributes[opt] Returns an ENUM_TYPE representing the enumeration, or NULL_TREE if the token stream isn't an enum-specifier after all. */ static tree cp_parser_enum_specifier (cp_parser* parser) { tree identifier; tree type; tree attributes; /* Parse tentatively so that we can back up if we don't find a enum-specifier. */ cp_parser_parse_tentatively (parser); /* Caller guarantees that the current token is 'enum', an identifier possibly follows, and the token after that is an opening brace. If we don't have an identifier, fabricate an anonymous name for the enumeration being defined. */ cp_lexer_consume_token (parser->lexer); attributes = cp_parser_attributes_opt (parser); if (cp_lexer_next_token_is (parser->lexer, CPP_NAME)) identifier = cp_parser_identifier (parser); else identifier = make_anon_name (); /* Look for the `{' but don't consume it yet. */ if (!cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE)) cp_parser_simulate_error (parser); if (!cp_parser_parse_definitely (parser)) return NULL_TREE; /* Issue an error message if type-definitions are forbidden here. */ if (!cp_parser_check_type_definition (parser)) type = error_mark_node; else /* Create the new type. We do this before consuming the opening brace so the enum will be recorded as being on the line of its tag (or the 'enum' keyword, if there is no tag). */ type = start_enum (identifier); /* Consume the opening brace. */ cp_lexer_consume_token (parser->lexer); if (type == error_mark_node) { cp_parser_skip_to_end_of_block_or_statement (parser); return error_mark_node; } /* If the next token is not '}', then there are some enumerators. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_CLOSE_BRACE)) cp_parser_enumerator_list (parser, type); /* Consume the final '}'. */ cp_parser_require (parser, CPP_CLOSE_BRACE, "`}'"); /* Look for trailing attributes to apply to this enumeration, and apply them if appropriate. */ if (cp_parser_allow_gnu_extensions_p (parser)) { tree trailing_attr = cp_parser_attributes_opt (parser); cplus_decl_attributes (&type, trailing_attr, (int) ATTR_FLAG_TYPE_IN_PLACE); } /* Finish up the enumeration. */ finish_enum (type); return type; } /* Parse an enumerator-list. The enumerators all have the indicated TYPE. enumerator-list: enumerator-definition enumerator-list , enumerator-definition */ static void cp_parser_enumerator_list (cp_parser* parser, tree type) { while (true) { /* Parse an enumerator-definition. */ cp_parser_enumerator_definition (parser, type); /* If the next token is not a ',', we've reached the end of the list. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_COMMA)) break; /* Otherwise, consume the `,' and keep going. */ cp_lexer_consume_token (parser->lexer); /* If the next token is a `}', there is a trailing comma. */ if (cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_BRACE)) { if (pedantic && !in_system_header) pedwarn ("comma at end of enumerator list"); break; } } } /* Parse an enumerator-definition. The enumerator has the indicated TYPE. enumerator-definition: enumerator enumerator = constant-expression enumerator: identifier */ static void cp_parser_enumerator_definition (cp_parser* parser, tree type) { tree identifier; tree value; /* Look for the identifier. */ identifier = cp_parser_identifier (parser); if (identifier == error_mark_node) return; /* If the next token is an '=', then there is an explicit value. */ if (cp_lexer_next_token_is (parser->lexer, CPP_EQ)) { /* Consume the `=' token. */ cp_lexer_consume_token (parser->lexer); /* Parse the value. */ value = cp_parser_constant_expression (parser, /*allow_non_constant_p=*/false, NULL); } else value = NULL_TREE; /* Create the enumerator. */ build_enumerator (identifier, value, type); } /* Parse a namespace-name. namespace-name: original-namespace-name namespace-alias Returns the NAMESPACE_DECL for the namespace. */ static tree cp_parser_namespace_name (cp_parser* parser) { tree identifier; tree namespace_decl; /* Get the name of the namespace. */ identifier = cp_parser_identifier (parser); if (identifier == error_mark_node) return error_mark_node; /* Look up the identifier in the currently active scope. Look only for namespaces, due to: [basic.lookup.udir] When looking up a namespace-name in a using-directive or alias definition, only namespace names are considered. And: [basic.lookup.qual] During the lookup of a name preceding the :: scope resolution operator, object, function, and enumerator names are ignored. (Note that cp_parser_class_or_namespace_name only calls this function if the token after the name is the scope resolution operator.) */ namespace_decl = cp_parser_lookup_name (parser, identifier, none_type, /*is_template=*/false, /*is_namespace=*/true, /*check_dependency=*/true, /*ambiguous_decls=*/NULL); /* If it's not a namespace, issue an error. */ if (namespace_decl == error_mark_node || TREE_CODE (namespace_decl) != NAMESPACE_DECL) { if (!cp_parser_uncommitted_to_tentative_parse_p (parser)) error ("%qD is not a namespace-name", identifier); cp_parser_error (parser, "expected namespace-name"); namespace_decl = error_mark_node; } return namespace_decl; } /* Parse a namespace-definition. namespace-definition: named-namespace-definition unnamed-namespace-definition named-namespace-definition: original-namespace-definition extension-namespace-definition original-namespace-definition: namespace identifier { namespace-body } extension-namespace-definition: namespace original-namespace-name { namespace-body } unnamed-namespace-definition: namespace { namespace-body } */ static void cp_parser_namespace_definition (cp_parser* parser) { tree identifier, attribs; /* APPLE LOCAL visibility 5805832 */ bool visibility_pushed = false; /* Look for the `namespace' keyword. */ cp_parser_require_keyword (parser, RID_NAMESPACE, "`namespace'"); /* Get the name of the namespace. We do not attempt to distinguish between an original-namespace-definition and an extension-namespace-definition at this point. The semantic analysis routines are responsible for that. */ if (cp_lexer_next_token_is (parser->lexer, CPP_NAME)) identifier = cp_parser_identifier (parser); else identifier = NULL_TREE; /* Parse any specified attributes. */ attribs = cp_parser_attributes_opt (parser); /* Look for the `{' to start the namespace. */ cp_parser_require (parser, CPP_OPEN_BRACE, "`{'"); /* Start the namespace. */ /* APPLE LOCAL visibility 5805832 */ visibility_pushed = push_namespace_with_attribs (identifier, attribs); /* Parse the body of the namespace. */ cp_parser_namespace_body (parser); /* APPLE LOCAL begin visibility 5805832 */ #ifdef HANDLE_PRAGMA_VISIBILITY if (visibility_pushed) pop_visibility (); #endif /* APPLE LOCAL end visibility 5805832 */ /* Finish the namespace. */ pop_namespace (); /* Look for the final `}'. */ cp_parser_require (parser, CPP_CLOSE_BRACE, "`}'"); } /* Parse a namespace-body. namespace-body: declaration-seq [opt] */ static void cp_parser_namespace_body (cp_parser* parser) { cp_parser_declaration_seq_opt (parser); } /* Parse a namespace-alias-definition. namespace-alias-definition: namespace identifier = qualified-namespace-specifier ; */ static void cp_parser_namespace_alias_definition (cp_parser* parser) { tree identifier; tree namespace_specifier; /* Look for the `namespace' keyword. */ cp_parser_require_keyword (parser, RID_NAMESPACE, "`namespace'"); /* Look for the identifier. */ identifier = cp_parser_identifier (parser); if (identifier == error_mark_node) return; /* Look for the `=' token. */ cp_parser_require (parser, CPP_EQ, "`='"); /* Look for the qualified-namespace-specifier. */ namespace_specifier = cp_parser_qualified_namespace_specifier (parser); /* Look for the `;' token. */ cp_parser_require (parser, CPP_SEMICOLON, "`;'"); /* Register the alias in the symbol table. */ do_namespace_alias (identifier, namespace_specifier); } /* Parse a qualified-namespace-specifier. qualified-namespace-specifier: :: [opt] nested-name-specifier [opt] namespace-name Returns a NAMESPACE_DECL corresponding to the specified namespace. */ static tree cp_parser_qualified_namespace_specifier (cp_parser* parser) { /* Look for the optional `::'. */ cp_parser_global_scope_opt (parser, /*current_scope_valid_p=*/false); /* Look for the optional nested-name-specifier. */ cp_parser_nested_name_specifier_opt (parser, /*typename_keyword_p=*/false, /*check_dependency_p=*/true, /*type_p=*/false, /*is_declaration=*/true); return cp_parser_namespace_name (parser); } /* Parse a using-declaration, or, if ACCESS_DECLARATION_P is true, an access declaration. using-declaration: using typename [opt] :: [opt] nested-name-specifier unqualified-id ; using :: unqualified-id ; access-declaration: qualified-id ; */ static bool cp_parser_using_declaration (cp_parser* parser, bool access_declaration_p) { cp_token *token; bool typename_p = false; bool global_scope_p; tree decl; tree identifier; tree qscope; if (access_declaration_p) cp_parser_parse_tentatively (parser); else { /* Look for the `using' keyword. */ cp_parser_require_keyword (parser, RID_USING, "`using'"); /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* See if it's `typename'. */ if (token->keyword == RID_TYPENAME) { /* Remember that we've seen it. */ typename_p = true; /* Consume the `typename' token. */ cp_lexer_consume_token (parser->lexer); } } /* Look for the optional global scope qualification. */ global_scope_p = (cp_parser_global_scope_opt (parser, /*current_scope_valid_p=*/false) != NULL_TREE); /* If we saw `typename', or didn't see `::', then there must be a nested-name-specifier present. */ if (typename_p || !global_scope_p) qscope = cp_parser_nested_name_specifier (parser, typename_p, /*check_dependency_p=*/true, /*type_p=*/false, /*is_declaration=*/true); /* Otherwise, we could be in either of the two productions. In that case, treat the nested-name-specifier as optional. */ else qscope = cp_parser_nested_name_specifier_opt (parser, /*typename_keyword_p=*/false, /*check_dependency_p=*/true, /*type_p=*/false, /*is_declaration=*/true); if (!qscope) qscope = global_namespace; if (access_declaration_p && cp_parser_error_occurred (parser)) /* Something has already gone wrong; there's no need to parse further. Since an error has occurred, the return value of cp_parser_parse_definitely will be false, as required. */ return cp_parser_parse_definitely (parser); /* Parse the unqualified-id. */ identifier = cp_parser_unqualified_id (parser, /*template_keyword_p=*/false, /*check_dependency_p=*/true, /*declarator_p=*/true, /*optional_p=*/false); if (access_declaration_p) { if (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON)) cp_parser_simulate_error (parser); if (!cp_parser_parse_definitely (parser)) return false; } /* The function we call to handle a using-declaration is different depending on what scope we are in. */ if (qscope == error_mark_node || identifier == error_mark_node) ; else if (TREE_CODE (identifier) != IDENTIFIER_NODE && TREE_CODE (identifier) != BIT_NOT_EXPR) /* [namespace.udecl] A using declaration shall not name a template-id. */ error ("a template-id may not appear in a using-declaration"); else { if (at_class_scope_p ()) { /* Create the USING_DECL. */ decl = do_class_using_decl (parser->scope, identifier); /* Add it to the list of members in this class. */ finish_member_declaration (decl); } else { decl = cp_parser_lookup_name_simple (parser, identifier); if (decl == error_mark_node) cp_parser_name_lookup_error (parser, identifier, decl, NULL); else if (!at_namespace_scope_p ()) do_local_using_decl (decl, qscope, identifier); else do_toplevel_using_decl (decl, qscope, identifier); } } /* Look for the final `;'. */ cp_parser_require (parser, CPP_SEMICOLON, "`;'"); return true; } /* Parse a using-directive. using-directive: using namespace :: [opt] nested-name-specifier [opt] namespace-name ; */ static void cp_parser_using_directive (cp_parser* parser) { tree namespace_decl; tree attribs; /* Look for the `using' keyword. */ cp_parser_require_keyword (parser, RID_USING, "`using'"); /* And the `namespace' keyword. */ cp_parser_require_keyword (parser, RID_NAMESPACE, "`namespace'"); /* Look for the optional `::' operator. */ cp_parser_global_scope_opt (parser, /*current_scope_valid_p=*/false); /* And the optional nested-name-specifier. */ cp_parser_nested_name_specifier_opt (parser, /*typename_keyword_p=*/false, /*check_dependency_p=*/true, /*type_p=*/false, /*is_declaration=*/true); /* Get the namespace being used. */ namespace_decl = cp_parser_namespace_name (parser); /* And any specified attributes. */ attribs = cp_parser_attributes_opt (parser); /* Update the symbol table. */ parse_using_directive (namespace_decl, attribs); /* Look for the final `;'. */ cp_parser_require (parser, CPP_SEMICOLON, "`;'"); } /* Parse an asm-definition. asm-definition: asm ( string-literal ) ; APPLE LOCAL begin CW asm blocks asm { asm-line [opt] } asm asm-line APPLE LOCAL end CW asm blocks GNU Extension: asm-definition: asm volatile [opt] ( string-literal ) ; asm volatile [opt] ( string-literal : asm-operand-list [opt] ) ; asm volatile [opt] ( string-literal : asm-operand-list [opt] : asm-operand-list [opt] ) ; asm volatile [opt] ( string-literal : asm-operand-list [opt] : asm-operand-list [opt] : asm-operand-list [opt] ) ; */ static void /* APPLE LOCAL CW asm blocks */ cp_parser_asm_definition (cp_parser* parser, bool statement_p ATTRIBUTE_UNUSED) { tree string; tree outputs = NULL_TREE; tree inputs = NULL_TREE; tree clobbers = NULL_TREE; /* APPLE LOCAL CW asm blocks */ tree uses = NULL_TREE; tree asm_stmt; bool volatile_p = false; bool extended_p = false; /* APPLE LOCAL begin CW asm blocks */ cp_token *nextup; /* Detect when a leading `asm' is actually a spec of an asm function rather than an asm statement or block. */ if (flag_iasm_blocks) { nextup = cp_lexer_peek_nth_token (parser->lexer, 2); if (statement_p && nextup->u.value && IASM_SEE_OPCODE (TYPESPEC, nextup->u.value) == IDENTIFIER) { nextup->keyword = RID_MAX; nextup->type = CPP_NAME; } if (!((nextup->type == CPP_OPEN_PAREN) || (nextup->keyword == RID_VOLATILE && cp_lexer_peek_nth_token (parser->lexer, 3)->type == CPP_OPEN_PAREN) || (nextup->type == CPP_OPEN_BRACE) || (nextup->type == CPP_ATSIGN) || (nextup->keyword == RID_ASM) || (nextup->type == CPP_DOT) || (nextup->type == CPP_SEMICOLON) || (nextup->flags & BOL) || (nextup->type == CPP_NAME && !iasm_typename_or_reserved (nextup->u.value)))) { /* An asm function - we'll treat the `asm' as if it were a storage class spec, which will eventually affect function body parsing. */ cp_parser_simple_declaration (parser, true); return; } } /* APPLE LOCAL end CW asm blocks */ /* Look for the `asm' keyword. */ cp_parser_require_keyword (parser, RID_ASM, "`asm'"); /* See if the next token is `volatile'. */ if (cp_parser_allow_gnu_extensions_p (parser) && cp_lexer_next_token_is_keyword (parser->lexer, RID_VOLATILE)) { /* Remember that we saw the `volatile' keyword. */ volatile_p = true; /* Consume the token. */ cp_lexer_consume_token (parser->lexer); } /* APPLE LOCAL begin CW asm blocks */ /* A CW-style asm block is introduced by an open brace. */ if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE)) { if (flag_iasm_blocks) cp_parser_iasm_compound_statement (parser); else error ("asm blocks not enabled, use `-fasm-blocks'"); return; } if (! volatile_p && (cp_lexer_next_token_is (parser->lexer, CPP_DOT) || cp_lexer_next_token_is (parser->lexer, CPP_ATSIGN) || cp_lexer_next_token_is (parser->lexer, CPP_NAME) || cp_lexer_next_token_is_keyword (parser->lexer, RID_ASM) || cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON) || (cp_lexer_iasm_bol (parser->lexer) && ! cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN)))) { if (flag_iasm_blocks) cp_parser_iasm_top_statement (parser); else error ("asm blocks not enabled, use `-fasm-blocks'"); return; } /* APPLE LOCAL end CW asm blocks */ /* Look for the opening `('. */ if (!cp_parser_require (parser, CPP_OPEN_PAREN, "`('")) return; /* Look for the string. */ string = cp_parser_string_literal (parser, false, false); if (string == error_mark_node) { cp_parser_skip_to_closing_parenthesis (parser, true, false, /*consume_paren=*/true); return; } /* If we're allowing GNU extensions, check for the extended assembly syntax. Unfortunately, the `:' tokens need not be separated by a space in C, and so, for compatibility, we tolerate that here too. Doing that means that we have to treat the `::' operator as two `:' tokens. */ if (cp_parser_allow_gnu_extensions_p (parser) && parser->in_function_body && (cp_lexer_next_token_is (parser->lexer, CPP_COLON) || cp_lexer_next_token_is (parser->lexer, CPP_SCOPE))) { bool inputs_p = false; bool clobbers_p = false; /* The extended syntax was used. */ extended_p = true; /* Look for outputs. */ if (cp_lexer_next_token_is (parser->lexer, CPP_COLON)) { /* Consume the `:'. */ cp_lexer_consume_token (parser->lexer); /* Parse the output-operands. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_COLON) && cp_lexer_next_token_is_not (parser->lexer, CPP_SCOPE) && cp_lexer_next_token_is_not (parser->lexer, CPP_CLOSE_PAREN)) outputs = cp_parser_asm_operand_list (parser); } /* If the next token is `::', there are no outputs, and the next token is the beginning of the inputs. */ else if (cp_lexer_next_token_is (parser->lexer, CPP_SCOPE)) /* The inputs are coming next. */ inputs_p = true; /* Look for inputs. */ if (inputs_p || cp_lexer_next_token_is (parser->lexer, CPP_COLON)) { /* Consume the `:' or `::'. */ cp_lexer_consume_token (parser->lexer); /* Parse the output-operands. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_COLON) && cp_lexer_next_token_is_not (parser->lexer, CPP_CLOSE_PAREN)) inputs = cp_parser_asm_operand_list (parser); } else if (cp_lexer_next_token_is (parser->lexer, CPP_SCOPE)) /* The clobbers are coming next. */ clobbers_p = true; /* Look for clobbers. */ if (clobbers_p || cp_lexer_next_token_is (parser->lexer, CPP_COLON)) { /* Consume the `:' or `::'. */ cp_lexer_consume_token (parser->lexer); /* Parse the clobbers. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_CLOSE_PAREN)) clobbers = cp_parser_asm_clobber_list (parser); } } /* Look for the closing `)'. */ if (!cp_parser_require (parser, CPP_CLOSE_PAREN, "`)'")) cp_parser_skip_to_closing_parenthesis (parser, true, false, /*consume_paren=*/true); cp_parser_require (parser, CPP_SEMICOLON, "`;'"); /* Create the ASM_EXPR. */ if (parser->in_function_body) { asm_stmt = finish_asm_stmt (volatile_p, string, outputs, /* APPLE LOCAL CW asm blocks */ inputs, clobbers, uses); /* If the extended syntax was not used, mark the ASM_EXPR. */ if (!extended_p) { tree temp = asm_stmt; if (TREE_CODE (temp) == CLEANUP_POINT_EXPR) temp = TREE_OPERAND (temp, 0); ASM_INPUT_P (temp) = 1; } } else cgraph_add_asm_node (string); } /* Declarators [gram.dcl.decl] */ /* Parse an init-declarator. init-declarator: declarator initializer [opt] GNU Extension: init-declarator: declarator asm-specification [opt] attributes [opt] initializer [opt] function-definition: decl-specifier-seq [opt] declarator ctor-initializer [opt] function-body decl-specifier-seq [opt] declarator function-try-block GNU Extension: function-definition: __extension__ function-definition The DECL_SPECIFIERS apply to this declarator. Returns a representation of the entity declared. If MEMBER_P is TRUE, then this declarator appears in a class scope. The new DECL created by this declarator is returned. The CHECKS are access checks that should be performed once we know what entity is being declared (and, therefore, what classes have befriended it). If FUNCTION_DEFINITION_ALLOWED_P then we handle the declarator and for a function-definition here as well. If the declarator is a declarator for a function-definition, *FUNCTION_DEFINITION_P will be TRUE upon return. By that point, the function-definition will have been completely parsed. FUNCTION_DEFINITION_P may be NULL if FUNCTION_DEFINITION_ALLOWED_P is FALSE. */ static tree cp_parser_init_declarator (cp_parser* parser, cp_decl_specifier_seq *decl_specifiers, VEC (deferred_access_check,gc)* checks, bool function_definition_allowed_p, bool member_p, int declares_class_or_enum, bool* function_definition_p) { cp_token *token; cp_declarator *declarator; tree prefix_attributes; tree attributes; tree asm_specification; tree initializer; tree decl = NULL_TREE; tree scope; bool is_initialized; /* Only valid if IS_INITIALIZED is true. In that case, CPP_EQ if initialized with "= ..", CPP_OPEN_PAREN if initialized with "(...)". */ enum cpp_ttype initialization_kind; bool is_parenthesized_init = false; bool is_non_constant_init; int ctor_dtor_or_conv_p; bool friend_p; tree pushed_scope = NULL; /* Gather the attributes that were provided with the decl-specifiers. */ prefix_attributes = decl_specifiers->attributes; /* Assume that this is not the declarator for a function definition. */ if (function_definition_p) *function_definition_p = false; /* Defer access checks while parsing the declarator; we cannot know what names are accessible until we know what is being declared. */ resume_deferring_access_checks (); /* Parse the declarator. */ declarator = cp_parser_declarator (parser, CP_PARSER_DECLARATOR_NAMED, &ctor_dtor_or_conv_p, /*parenthesized_p=*/NULL, /*member_p=*/false); /* Gather up the deferred checks. */ stop_deferring_access_checks (); /* If the DECLARATOR was erroneous, there's no need to go further. */ if (declarator == cp_error_declarator) return error_mark_node; /* Check that the number of template-parameter-lists is OK. */ if (!cp_parser_check_declarator_template_parameters (parser, declarator)) return error_mark_node; if (declares_class_or_enum & 2) cp_parser_check_for_definition_in_return_type (declarator, decl_specifiers->type); /* Figure out what scope the entity declared by the DECLARATOR is located in. `grokdeclarator' sometimes changes the scope, so we compute it now. */ scope = get_scope_of_declarator (declarator); /* If we're allowing GNU extensions, look for an asm-specification and attributes. */ if (cp_parser_allow_gnu_extensions_p (parser)) { /* Look for an asm-specification. */ asm_specification = cp_parser_asm_specification_opt (parser); /* And attributes. */ attributes = cp_parser_attributes_opt (parser); } else { asm_specification = NULL_TREE; attributes = NULL_TREE; } /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* Check to see if the token indicates the start of a function-definition. */ if (cp_parser_token_starts_function_definition_p (token)) { if (!function_definition_allowed_p) { /* If a function-definition should not appear here, issue an error message. */ cp_parser_error (parser, "a function-definition is not allowed here"); return error_mark_node; } else { /* Neither attributes nor an asm-specification are allowed on a function-definition. */ if (asm_specification) error ("an asm-specification is not allowed on a function-definition"); if (attributes) error ("attributes are not allowed on a function-definition"); /* This is a function-definition. */ *function_definition_p = true; /* Parse the function definition. */ if (member_p) decl = cp_parser_save_member_function_body (parser, decl_specifiers, declarator, prefix_attributes); else decl = (cp_parser_function_definition_from_specifiers_and_declarator (parser, decl_specifiers, prefix_attributes, declarator)); return decl; } } /* [dcl.dcl] Only in function declarations for constructors, destructors, and type conversions can the decl-specifier-seq be omitted. We explicitly postpone this check past the point where we handle function-definitions because we tolerate function-definitions that are missing their return types in some modes. */ if (!decl_specifiers->any_specifiers_p && ctor_dtor_or_conv_p <= 0) { cp_parser_error (parser, "expected constructor, destructor, or type conversion"); return error_mark_node; } /* An `=' or an `(' indicates an initializer. */ if (token->type == CPP_EQ || token->type == CPP_OPEN_PAREN) { is_initialized = true; initialization_kind = token->type; } else { /* If the init-declarator isn't initialized and isn't followed by a `,' or `;', it's not a valid init-declarator. */ if (token->type != CPP_COMMA && token->type != CPP_SEMICOLON) { cp_parser_error (parser, "expected initializer"); return error_mark_node; } is_initialized = false; initialization_kind = CPP_EOF; } /* Because start_decl has side-effects, we should only call it if we know we're going ahead. By this point, we know that we cannot possibly be looking at any other construct. */ cp_parser_commit_to_tentative_parse (parser); /* If the decl specifiers were bad, issue an error now that we're sure this was intended to be a declarator. Then continue declaring the variable(s), as int, to try to cut down on further errors. */ if (decl_specifiers->any_specifiers_p && decl_specifiers->type == error_mark_node) { cp_parser_error (parser, "invalid type in declaration"); decl_specifiers->type = integer_type_node; } /* Check to see whether or not this declaration is a friend. */ friend_p = cp_parser_friend_p (decl_specifiers); /* Enter the newly declared entry in the symbol table. If we're processing a declaration in a class-specifier, we wait until after processing the initializer. */ if (!member_p) { if (parser->in_unbraced_linkage_specification_p) decl_specifiers->storage_class = sc_extern; decl = start_decl (declarator, decl_specifiers, is_initialized, attributes, prefix_attributes, &pushed_scope); } else if (scope) /* Enter the SCOPE. That way unqualified names appearing in the initializer will be looked up in SCOPE. */ pushed_scope = push_scope (scope); /* Perform deferred access control checks, now that we know in which SCOPE the declared entity resides. */ if (!member_p && decl) { tree saved_current_function_decl = NULL_TREE; /* If the entity being declared is a function, pretend that we are in its scope. If it is a `friend', it may have access to things that would not otherwise be accessible. */ if (TREE_CODE (decl) == FUNCTION_DECL) { saved_current_function_decl = current_function_decl; current_function_decl = decl; } /* Perform access checks for template parameters. */ cp_parser_perform_template_parameter_access_checks (checks); /* Perform the access control checks for the declarator and the the decl-specifiers. */ perform_deferred_access_checks (); /* Restore the saved value. */ if (TREE_CODE (decl) == FUNCTION_DECL) current_function_decl = saved_current_function_decl; } /* Parse the initializer. */ initializer = NULL_TREE; is_parenthesized_init = false; is_non_constant_init = true; if (is_initialized) { if (function_declarator_p (declarator)) { if (initialization_kind == CPP_EQ) initializer = cp_parser_pure_specifier (parser); else { /* If the declaration was erroneous, we don't really know what the user intended, so just silently consume the initializer. */ if (decl != error_mark_node) error ("initializer provided for function"); cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true, /*or_comma=*/false, /*consume_paren=*/true); } } else initializer = cp_parser_initializer (parser, &is_parenthesized_init, &is_non_constant_init); } /* The old parser allows attributes to appear after a parenthesized initializer. Mark Mitchell proposed removing this functionality on the GCC mailing lists on 2002-08-13. This parser accepts the attributes -- but ignores them. */ if (cp_parser_allow_gnu_extensions_p (parser) && is_parenthesized_init) if (cp_parser_attributes_opt (parser)) warning (OPT_Wattributes, "attributes after parenthesized initializer ignored"); /* For an in-class declaration, use `grokfield' to create the declaration. */ if (member_p) { if (pushed_scope) { pop_scope (pushed_scope); pushed_scope = false; } decl = grokfield (declarator, decl_specifiers, initializer, !is_non_constant_init, /*asmspec=*/NULL_TREE, prefix_attributes); if (decl && TREE_CODE (decl) == FUNCTION_DECL) cp_parser_save_default_args (parser, decl); } /* Finish processing the declaration. But, skip friend declarations. */ if (!friend_p && decl && decl != error_mark_node) { cp_finish_decl (decl, initializer, !is_non_constant_init, asm_specification, /* If the initializer is in parentheses, then this is a direct-initialization, which means that an `explicit' constructor is OK. Otherwise, an `explicit' constructor cannot be used. */ ((is_parenthesized_init || !is_initialized) ? 0 : LOOKUP_ONLYCONVERTING)); } if (!friend_p && pushed_scope) pop_scope (pushed_scope); return decl; } /* APPLE LOCAL begin blocks 6040305 (cc) */ static cp_cv_quals cp_parser_cv_qualifier_or_attribute_seq_opt (cp_parser *parser, tree *attrs_p) { cp_cv_quals quals = TYPE_UNQUALIFIED; cp_cv_quals q; cp_token *token; *attrs_p = NULL_TREE; while (true) { /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* Handle attributes. */ if (token->keyword == RID_ATTRIBUTE) { /* Parse the attributes. */ *attrs_p = chainon (*attrs_p, cp_parser_attributes_opt (parser)); continue; } q = cp_parser_cv_qualifier_seq_opt (parser); if (q == TYPE_UNQUALIFIED) break; quals |= q; } return quals; } /* APPLE LOCAL end blocks 6040305 (cc) */ /* Parse a declarator. declarator: direct-declarator ptr-operator declarator abstract-declarator: ptr-operator abstract-declarator [opt] direct-abstract-declarator GNU Extensions: declarator: attributes [opt] direct-declarator attributes [opt] ptr-operator declarator abstract-declarator: attributes [opt] ptr-operator abstract-declarator [opt] attributes [opt] direct-abstract-declarator APPLE LOCAL begin blocks 6339747 block-declarator: attributes [opt] ptr-operator block-declarator [opt] attributes [opt] direct-block-declarator APPLE LOCAL end blocks 6339747 If CTOR_DTOR_OR_CONV_P is not NULL, *CTOR_DTOR_OR_CONV_P is used to detect constructor, destructor or conversion operators. It is set to -1 if the declarator is a name, and +1 if it is a function. Otherwise it is set to zero. Usually you just want to test for >0, but internally the negative value is used. (The reason for CTOR_DTOR_OR_CONV_P is that a declaration must have a decl-specifier-seq unless it declares a constructor, destructor, or conversion. It might seem that we could check this condition in semantic analysis, rather than parsing, but that makes it difficult to handle something like `f()'. We want to notice that there are no decl-specifiers, and therefore realize that this is an expression, not a declaration.) If PARENTHESIZED_P is non-NULL, *PARENTHESIZED_P is set to true iff the declarator is a direct-declarator of the form "(...)". MEMBER_P is true iff this declarator is a member-declarator. */ static cp_declarator * cp_parser_declarator (cp_parser* parser, cp_parser_declarator_kind dcl_kind, int* ctor_dtor_or_conv_p, bool* parenthesized_p, bool member_p) { cp_token *token; cp_declarator *declarator; enum tree_code code; cp_cv_quals cv_quals; tree class_type; tree attributes = NULL_TREE; /* Assume this is not a constructor, destructor, or type-conversion operator. */ if (ctor_dtor_or_conv_p) *ctor_dtor_or_conv_p = 0; if (cp_parser_allow_gnu_extensions_p (parser)) attributes = cp_parser_attributes_opt (parser); /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* APPLE LOCAL begin blocks 6040305 (cc) */ if (flag_blocks && token->type == CPP_XOR) { cp_cv_quals quals; cp_declarator *inner; tree attrs; cp_lexer_consume_token (parser->lexer); /* cp_parse_declspecs (parser, quals_attrs, false, false, true); */ quals = cp_parser_cv_qualifier_or_attribute_seq_opt (parser, &attrs); inner = cp_parser_declarator (parser, CP_PARSER_DECLARATOR_EITHER, /*ctor_dtor_or_conv_p=*/NULL, /*parenthesized_p=*/NULL, /*member_p=*/false); if (inner == cp_error_declarator) return inner; return make_block_pointer_declarator (attrs, quals, inner); } /* APPLE LOCAL end blocks 6040305 (cc) */ /* Check for the ptr-operator production. */ cp_parser_parse_tentatively (parser); /* Parse the ptr-operator. */ code = cp_parser_ptr_operator (parser, &class_type, &cv_quals); /* If that worked, then we have a ptr-operator. */ if (cp_parser_parse_definitely (parser)) { /* If a ptr-operator was found, then this declarator was not parenthesized. */ if (parenthesized_p) *parenthesized_p = true; /* The dependent declarator is optional if we are parsing an abstract-declarator. */ if (dcl_kind != CP_PARSER_DECLARATOR_NAMED) cp_parser_parse_tentatively (parser); /* Parse the dependent declarator. */ declarator = cp_parser_declarator (parser, dcl_kind, /*ctor_dtor_or_conv_p=*/NULL, /*parenthesized_p=*/NULL, /*member_p=*/false); /* If we are parsing an abstract-declarator, we must handle the case where the dependent declarator is absent. */ if (dcl_kind != CP_PARSER_DECLARATOR_NAMED && !cp_parser_parse_definitely (parser)) declarator = NULL; /* Build the representation of the ptr-operator. */ if (class_type) declarator = make_ptrmem_declarator (cv_quals, class_type, declarator); else if (code == INDIRECT_REF) declarator = make_pointer_declarator (cv_quals, declarator); else declarator = make_reference_declarator (cv_quals, declarator); } /* Everything else is a direct-declarator. */ else { if (parenthesized_p) *parenthesized_p = cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN); declarator = cp_parser_direct_declarator (parser, dcl_kind, ctor_dtor_or_conv_p, member_p); } if (attributes && declarator && declarator != cp_error_declarator) declarator->attributes = attributes; return declarator; } /* Parse a direct-declarator or direct-abstract-declarator. direct-declarator: declarator-id direct-declarator ( parameter-declaration-clause ) cv-qualifier-seq [opt] exception-specification [opt] direct-declarator [ constant-expression [opt] ] ( declarator ) direct-abstract-declarator: direct-abstract-declarator [opt] ( parameter-declaration-clause ) cv-qualifier-seq [opt] exception-specification [opt] direct-abstract-declarator [opt] [ constant-expression [opt] ] ( abstract-declarator ) APPLE LOCAL begin blocks 6339747 GNU Extensions: direct-block-declarator: direct-block-declarator [opt] ( parameter-declaration-clause ) [opt] exception-specification [opt] direct-block-declarator [opt] [ constant-expression [opt] ] ( block-declarator ) APPLE LOCAL end blocks 6339747 Returns a representation of the declarator. DCL_KIND is CP_PARSER_DECLARATOR_ABSTRACT, if we are parsing a direct-abstract-declarator. It is CP_PARSER_DECLARATOR_NAMED, if we are parsing a direct-declarator. It is CP_PARSER_DECLARATOR_EITHER, if we can accept either - in the case of ambiguity we prefer an abstract declarator, as per [dcl.ambig.res]. CTOR_DTOR_OR_CONV_P and MEMBER_P are as for cp_parser_declarator. */ static cp_declarator * cp_parser_direct_declarator (cp_parser* parser, cp_parser_declarator_kind dcl_kind, int* ctor_dtor_or_conv_p, bool member_p) { cp_token *token; cp_declarator *declarator = NULL; tree scope = NULL_TREE; bool saved_default_arg_ok_p = parser->default_arg_ok_p; bool saved_in_declarator_p = parser->in_declarator_p; bool first = true; tree pushed_scope = NULL_TREE; while (true) { /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); if (token->type == CPP_OPEN_PAREN) { /* This is either a parameter-declaration-clause, or a parenthesized declarator. When we know we are parsing a named declarator, it must be a parenthesized declarator if FIRST is true. For instance, `(int)' is a parameter-declaration-clause, with an omitted direct-abstract-declarator. But `((*))', is a parenthesized abstract declarator. Finally, when T is a template parameter `(T)' is a parameter-declaration-clause, and not a parenthesized named declarator. We first try and parse a parameter-declaration-clause, and then try a nested declarator (if FIRST is true). It is not an error for it not to be a parameter-declaration-clause, even when FIRST is false. Consider, int i (int); int i (3); The first is the declaration of a function while the second is a the definition of a variable, including its initializer. Having seen only the parenthesis, we cannot know which of these two alternatives should be selected. Even more complex are examples like: int i (int (a)); int i (int (3)); The former is a function-declaration; the latter is a variable initialization. Thus again, we try a parameter-declaration-clause, and if that fails, we back out and return. */ if (!first || dcl_kind != CP_PARSER_DECLARATOR_NAMED) { cp_parameter_declarator *params; unsigned saved_num_template_parameter_lists; /* In a member-declarator, the only valid interpretation of a parenthesis is the start of a parameter-declaration-clause. (It is invalid to initialize a static data member with a parenthesized initializer; only the "=" form of initialization is permitted.) */ if (!member_p) cp_parser_parse_tentatively (parser); /* Consume the `('. */ cp_lexer_consume_token (parser->lexer); if (first) { /* If this is going to be an abstract declarator, we're in a declarator and we can't have default args. */ parser->default_arg_ok_p = false; parser->in_declarator_p = true; } /* Inside the function parameter list, surrounding template-parameter-lists do not apply. */ saved_num_template_parameter_lists = parser->num_template_parameter_lists; parser->num_template_parameter_lists = 0; /* Parse the parameter-declaration-clause. */ params = cp_parser_parameter_declaration_clause (parser); parser->num_template_parameter_lists = saved_num_template_parameter_lists; /* If all went well, parse the cv-qualifier-seq and the exception-specification. */ if (member_p || cp_parser_parse_definitely (parser)) { cp_cv_quals cv_quals; tree exception_specification; if (ctor_dtor_or_conv_p) *ctor_dtor_or_conv_p = *ctor_dtor_or_conv_p < 0; first = false; /* Consume the `)'. */ cp_parser_require (parser, CPP_CLOSE_PAREN, "`)'"); /* APPLE LOCAL begin blocks 6339747 */ if (dcl_kind != BLOCKDEF) { /* Parse the cv-qualifier-seq. */ cv_quals = cp_parser_cv_qualifier_seq_opt (parser); } else cv_quals = TYPE_UNQUALIFIED; /* APPLE LOCAL end blocks 6339747 */ /* And the exception-specification. */ exception_specification = cp_parser_exception_specification_opt (parser); /* Create the function-declarator. */ declarator = make_call_declarator (declarator, params, cv_quals, exception_specification); /* Any subsequent parameter lists are to do with return type, so are not those of the declared function. */ parser->default_arg_ok_p = false; /* Repeat the main loop. */ continue; } } /* If this is the first, we can try a parenthesized declarator. */ if (first) { bool saved_in_type_id_in_expr_p; parser->default_arg_ok_p = saved_default_arg_ok_p; parser->in_declarator_p = saved_in_declarator_p; /* Consume the `('. */ cp_lexer_consume_token (parser->lexer); /* Parse the nested declarator. */ saved_in_type_id_in_expr_p = parser->in_type_id_in_expr_p; parser->in_type_id_in_expr_p = true; declarator = cp_parser_declarator (parser, dcl_kind, ctor_dtor_or_conv_p, /*parenthesized_p=*/NULL, member_p); parser->in_type_id_in_expr_p = saved_in_type_id_in_expr_p; first = false; /* Expect a `)'. */ if (!cp_parser_require (parser, CPP_CLOSE_PAREN, "`)'")) declarator = cp_error_declarator; if (declarator == cp_error_declarator) break; goto handle_declarator; } /* Otherwise, we must be done. */ else break; } else if ((!first || dcl_kind != CP_PARSER_DECLARATOR_NAMED) && token->type == CPP_OPEN_SQUARE) { /* Parse an array-declarator. */ tree bounds; if (ctor_dtor_or_conv_p) *ctor_dtor_or_conv_p = 0; first = false; parser->default_arg_ok_p = false; parser->in_declarator_p = true; /* Consume the `['. */ cp_lexer_consume_token (parser->lexer); /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* If the next token is `]', then there is no constant-expression. */ if (token->type != CPP_CLOSE_SQUARE) { bool non_constant_p; bounds = cp_parser_constant_expression (parser, /*allow_non_constant=*/true, &non_constant_p); if (!non_constant_p) bounds = fold_non_dependent_expr (bounds); /* Normally, the array bound must be an integral constant expression. However, as an extension, we allow VLAs in function scopes. */ else if (!parser->in_function_body) { error ("array bound is not an integer constant"); bounds = error_mark_node; } } else bounds = NULL_TREE; /* Look for the closing `]'. */ if (!cp_parser_require (parser, CPP_CLOSE_SQUARE, "`]'")) { declarator = cp_error_declarator; break; } declarator = make_array_declarator (declarator, bounds); } /* APPLE LOCAL begin blocks 6339747 */ else if (first && (dcl_kind == CP_PARSER_DECLARATOR_NAMED || dcl_kind == CP_PARSER_DECLARATOR_EITHER)) /* APPLE LOCAL end blocks 6339747 */ { tree qualifying_scope; tree unqualified_name; special_function_kind sfk; bool abstract_ok; /* Parse a declarator-id */ abstract_ok = (dcl_kind == CP_PARSER_DECLARATOR_EITHER); if (abstract_ok) cp_parser_parse_tentatively (parser); unqualified_name = cp_parser_declarator_id (parser, /*optional_p=*/abstract_ok); qualifying_scope = parser->scope; if (abstract_ok) { if (!cp_parser_parse_definitely (parser)) unqualified_name = error_mark_node; else if (unqualified_name && (qualifying_scope || (TREE_CODE (unqualified_name) != IDENTIFIER_NODE))) { cp_parser_error (parser, "expected unqualified-id"); unqualified_name = error_mark_node; } } if (!unqualified_name) return NULL; if (unqualified_name == error_mark_node) { declarator = cp_error_declarator; break; } if (qualifying_scope && at_namespace_scope_p () && TREE_CODE (qualifying_scope) == TYPENAME_TYPE) { /* In the declaration of a member of a template class outside of the class itself, the SCOPE will sometimes be a TYPENAME_TYPE. For example, given: template <typename T> int S<T>::R::i = 3; the SCOPE will be a TYPENAME_TYPE for `S<T>::R'. In this context, we must resolve S<T>::R to an ordinary type, rather than a typename type. The reason we normally avoid resolving TYPENAME_TYPEs is that a specialization of `S' might render `S<T>::R' not a type. However, if `S' is specialized, then this `i' will not be used, so there is no harm in resolving the types here. */ tree type; /* Resolve the TYPENAME_TYPE. */ type = resolve_typename_type (qualifying_scope, /*only_current_p=*/false); /* If that failed, the declarator is invalid. */ if (type == error_mark_node) error ("%<%T::%D%> is not a type", TYPE_CONTEXT (qualifying_scope), TYPE_IDENTIFIER (qualifying_scope)); qualifying_scope = type; } sfk = sfk_none; if (unqualified_name) { tree class_type; if (qualifying_scope && CLASS_TYPE_P (qualifying_scope)) class_type = qualifying_scope; else class_type = current_class_type; if (TREE_CODE (unqualified_name) == TYPE_DECL) { tree name_type = TREE_TYPE (unqualified_name); if (class_type && same_type_p (name_type, class_type)) { if (qualifying_scope && CLASSTYPE_USE_TEMPLATE (name_type)) { error ("invalid use of constructor as a template"); inform ("use %<%T::%D%> instead of %<%T::%D%> to " "name the constructor in a qualified name", class_type, DECL_NAME (TYPE_TI_TEMPLATE (class_type)), class_type, name_type); declarator = cp_error_declarator; break; } else unqualified_name = constructor_name (class_type); } else { /* We do not attempt to print the declarator here because we do not have enough information about its original syntactic form. */ cp_parser_error (parser, "invalid declarator"); declarator = cp_error_declarator; break; } } if (class_type) { if (TREE_CODE (unqualified_name) == BIT_NOT_EXPR) sfk = sfk_destructor; else if (IDENTIFIER_TYPENAME_P (unqualified_name)) sfk = sfk_conversion; else if (/* There's no way to declare a constructor for an anonymous type, even if the type got a name for linkage purposes. */ !TYPE_WAS_ANONYMOUS (class_type) && constructor_name_p (unqualified_name, class_type)) { unqualified_name = constructor_name (class_type); sfk = sfk_constructor; } if (ctor_dtor_or_conv_p && sfk != sfk_none) *ctor_dtor_or_conv_p = -1; } } declarator = make_id_declarator (qualifying_scope, unqualified_name, sfk); declarator->id_loc = token->location; handle_declarator:; scope = get_scope_of_declarator (declarator); if (scope) /* Any names that appear after the declarator-id for a member are looked up in the containing scope. */ pushed_scope = push_scope (scope); parser->in_declarator_p = true; if ((ctor_dtor_or_conv_p && *ctor_dtor_or_conv_p) || (declarator && declarator->kind == cdk_id)) /* Default args are only allowed on function declarations. */ parser->default_arg_ok_p = saved_default_arg_ok_p; else parser->default_arg_ok_p = false; first = false; } /* We're done. */ else break; } /* For an abstract declarator, we might wind up with nothing at this point. That's an error; the declarator is not optional. */ /* APPLE LOCAL blocks 6339747 */ if (!declarator && dcl_kind != CP_PARSER_DECLARATOR_BLOCK) cp_parser_error (parser, "expected declarator"); /* If we entered a scope, we must exit it now. */ if (pushed_scope) pop_scope (pushed_scope); parser->default_arg_ok_p = saved_default_arg_ok_p; parser->in_declarator_p = saved_in_declarator_p; return declarator; } /* Parse a ptr-operator. ptr-operator: * cv-qualifier-seq [opt] & :: [opt] nested-name-specifier * cv-qualifier-seq [opt] GNU Extension: ptr-operator: & cv-qualifier-seq [opt] APPLE LOCAL blocks 6040305 (cc) ^ Returns INDIRECT_REF if a pointer, or pointer-to-member, was used. Returns ADDR_EXPR if a reference was used. In the case of a pointer-to-member, *TYPE is filled in with the TYPE containing the member. *CV_QUALS is filled in with the cv-qualifier-seq, or TYPE_UNQUALIFIED, if there are no cv-qualifiers. Returns ERROR_MARK if an error occurred. */ static enum tree_code cp_parser_ptr_operator (cp_parser* parser, tree* type, cp_cv_quals *cv_quals) { enum tree_code code = ERROR_MARK; cp_token *token; /* Assume that it's not a pointer-to-member. */ *type = NULL_TREE; /* And that there are no cv-qualifiers. */ *cv_quals = TYPE_UNQUALIFIED; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* If it's a `*' or `&' we have a pointer or reference. */ if (token->type == CPP_MULT || token->type == CPP_AND) { /* Remember which ptr-operator we were processing. */ code = (token->type == CPP_AND ? ADDR_EXPR : INDIRECT_REF); /* Consume the `*' or `&'. */ cp_lexer_consume_token (parser->lexer); /* A `*' can be followed by a cv-qualifier-seq, and so can a `&', if we are allowing GNU extensions. (The only qualifier that can legally appear after `&' is `restrict', but that is enforced during semantic analysis. */ if (code == INDIRECT_REF || cp_parser_allow_gnu_extensions_p (parser)) *cv_quals = cp_parser_cv_qualifier_seq_opt (parser); } else { /* Try the pointer-to-member case. */ cp_parser_parse_tentatively (parser); /* Look for the optional `::' operator. */ cp_parser_global_scope_opt (parser, /*current_scope_valid_p=*/false); /* Look for the nested-name specifier. */ cp_parser_nested_name_specifier (parser, /*typename_keyword_p=*/false, /*check_dependency_p=*/true, /*type_p=*/false, /*is_declaration=*/false); /* If we found it, and the next token is a `*', then we are indeed looking at a pointer-to-member operator. */ if (!cp_parser_error_occurred (parser) && cp_parser_require (parser, CPP_MULT, "`*'")) { /* Indicate that the `*' operator was used. */ code = INDIRECT_REF; if (TREE_CODE (parser->scope) == NAMESPACE_DECL) error ("%qD is a namespace", parser->scope); else { /* The type of which the member is a member is given by the current SCOPE. */ *type = parser->scope; /* The next name will not be qualified. */ parser->scope = NULL_TREE; parser->qualifying_scope = NULL_TREE; parser->object_scope = NULL_TREE; /* Look for the optional cv-qualifier-seq. */ *cv_quals = cp_parser_cv_qualifier_seq_opt (parser); } } /* If that didn't work we don't have a ptr-operator. */ if (!cp_parser_parse_definitely (parser)) cp_parser_error (parser, "expected ptr-operator"); } return code; } /* Parse an (optional) cv-qualifier-seq. cv-qualifier-seq: cv-qualifier cv-qualifier-seq [opt] cv-qualifier: const volatile GNU Extension: cv-qualifier: __restrict__ Returns a bitmask representing the cv-qualifiers. */ static cp_cv_quals cp_parser_cv_qualifier_seq_opt (cp_parser* parser) { cp_cv_quals cv_quals = TYPE_UNQUALIFIED; while (true) { cp_token *token; cp_cv_quals cv_qualifier; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* See if it's a cv-qualifier. */ switch (token->keyword) { case RID_CONST: cv_qualifier = TYPE_QUAL_CONST; break; case RID_VOLATILE: cv_qualifier = TYPE_QUAL_VOLATILE; break; case RID_RESTRICT: cv_qualifier = TYPE_QUAL_RESTRICT; break; default: cv_qualifier = TYPE_UNQUALIFIED; break; } if (!cv_qualifier) break; if (cv_quals & cv_qualifier) { error ("duplicate cv-qualifier"); cp_lexer_purge_token (parser->lexer); } else { cp_lexer_consume_token (parser->lexer); cv_quals |= cv_qualifier; } } return cv_quals; } /* Parse a declarator-id. declarator-id: id-expression :: [opt] nested-name-specifier [opt] type-name In the `id-expression' case, the value returned is as for cp_parser_id_expression if the id-expression was an unqualified-id. If the id-expression was a qualified-id, then a SCOPE_REF is returned. The first operand is the scope (either a NAMESPACE_DECL or TREE_TYPE), but the second is still just a representation of an unqualified-id. */ static tree cp_parser_declarator_id (cp_parser* parser, bool optional_p) { tree id; /* The expression must be an id-expression. Assume that qualified names are the names of types so that: template <class T> int S<T>::R::i = 3; will work; we must treat `S<T>::R' as the name of a type. Similarly, assume that qualified names are templates, where required, so that: template <class T> int S<T>::R<T>::i = 3; will work, too. */ id = cp_parser_id_expression (parser, /*template_keyword_p=*/false, /*check_dependency_p=*/false, /*template_p=*/NULL, /*declarator_p=*/true, optional_p); if (id && BASELINK_P (id)) id = BASELINK_FUNCTIONS (id); return id; } /* Parse a type-id. type-id: type-specifier-seq abstract-declarator [opt] Returns the TYPE specified. */ static tree cp_parser_type_id (cp_parser* parser) { cp_decl_specifier_seq type_specifier_seq; cp_declarator *abstract_declarator; /* Parse the type-specifier-seq. */ cp_parser_type_specifier_seq (parser, /*is_condition=*/false, &type_specifier_seq); if (type_specifier_seq.type == error_mark_node) return error_mark_node; /* There might or might not be an abstract declarator. */ cp_parser_parse_tentatively (parser); /* Look for the declarator. */ abstract_declarator = cp_parser_declarator (parser, CP_PARSER_DECLARATOR_ABSTRACT, NULL, /*parenthesized_p=*/NULL, /*member_p=*/false); /* Check to see if there really was a declarator. */ if (!cp_parser_parse_definitely (parser)) abstract_declarator = NULL; return groktypename (&type_specifier_seq, abstract_declarator); } /* Parse a type-specifier-seq. type-specifier-seq: type-specifier type-specifier-seq [opt] GNU extension: type-specifier-seq: attributes type-specifier-seq [opt] If IS_CONDITION is true, we are at the start of a "condition", e.g., we've just seen "if (". Sets *TYPE_SPECIFIER_SEQ to represent the sequence. */ static void cp_parser_type_specifier_seq (cp_parser* parser, bool is_condition, cp_decl_specifier_seq *type_specifier_seq) { bool seen_type_specifier = false; cp_parser_flags flags = CP_PARSER_FLAGS_OPTIONAL; /* Clear the TYPE_SPECIFIER_SEQ. */ clear_decl_specs (type_specifier_seq); /* Parse the type-specifiers and attributes. */ while (true) { tree type_specifier; bool is_cv_qualifier; /* Check for attributes first. */ if (cp_lexer_next_token_is_keyword (parser->lexer, RID_ATTRIBUTE)) { type_specifier_seq->attributes = chainon (type_specifier_seq->attributes, cp_parser_attributes_opt (parser)); continue; } /* Look for the type-specifier. */ type_specifier = cp_parser_type_specifier (parser, flags, type_specifier_seq, /*is_declaration=*/false, NULL, &is_cv_qualifier); if (!type_specifier) { /* If the first type-specifier could not be found, this is not a type-specifier-seq at all. */ if (!seen_type_specifier) { cp_parser_error (parser, "expected type-specifier"); type_specifier_seq->type = error_mark_node; return; } /* If subsequent type-specifiers could not be found, the type-specifier-seq is complete. */ break; } seen_type_specifier = true; /* The standard says that a condition can be: type-specifier-seq declarator = assignment-expression However, given: struct S {}; if (int S = ...) we should treat the "S" as a declarator, not as a type-specifier. The standard doesn't say that explicitly for type-specifier-seq, but it does say that for decl-specifier-seq in an ordinary declaration. Perhaps it would be clearer just to allow a decl-specifier-seq here, and then add a semantic restriction that if any decl-specifiers that are not type-specifiers appear, the program is invalid. */ if (is_condition && !is_cv_qualifier) flags |= CP_PARSER_FLAGS_NO_USER_DEFINED_TYPES; } cp_parser_check_decl_spec (type_specifier_seq); } /* Parse a parameter-declaration-clause. parameter-declaration-clause: parameter-declaration-list [opt] ... [opt] parameter-declaration-list , ... Returns a representation for the parameter declarations. A return value of NULL indicates a parameter-declaration-clause consisting only of an ellipsis. */ static cp_parameter_declarator * cp_parser_parameter_declaration_clause (cp_parser* parser) { cp_parameter_declarator *parameters; cp_token *token; bool ellipsis_p; bool is_error; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* Check for trivial parameter-declaration-clauses. */ if (token->type == CPP_ELLIPSIS) { /* Consume the `...' token. */ cp_lexer_consume_token (parser->lexer); return NULL; } else if (token->type == CPP_CLOSE_PAREN) /* There are no parameters. */ { #ifndef NO_IMPLICIT_EXTERN_C if (in_system_header && current_class_type == NULL && current_lang_name == lang_name_c) return NULL; else #endif return no_parameters; } /* Check for `(void)', too, which is a special case. */ else if (token->keyword == RID_VOID && (cp_lexer_peek_nth_token (parser->lexer, 2)->type == CPP_CLOSE_PAREN)) { /* Consume the `void' token. */ cp_lexer_consume_token (parser->lexer); /* There are no parameters. */ return no_parameters; } /* Parse the parameter-declaration-list. */ parameters = cp_parser_parameter_declaration_list (parser, &is_error); /* If a parse error occurred while parsing the parameter-declaration-list, then the entire parameter-declaration-clause is erroneous. */ if (is_error) return NULL; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* If it's a `,', the clause should terminate with an ellipsis. */ if (token->type == CPP_COMMA) { /* Consume the `,'. */ cp_lexer_consume_token (parser->lexer); /* Expect an ellipsis. */ ellipsis_p = (cp_parser_require (parser, CPP_ELLIPSIS, "`...'") != NULL); } /* It might also be `...' if the optional trailing `,' was omitted. */ else if (token->type == CPP_ELLIPSIS) { /* Consume the `...' token. */ cp_lexer_consume_token (parser->lexer); /* And remember that we saw it. */ ellipsis_p = true; } else ellipsis_p = false; /* Finish the parameter list. */ if (parameters && ellipsis_p) parameters->ellipsis_p = true; return parameters; } /* Parse a parameter-declaration-list. parameter-declaration-list: parameter-declaration parameter-declaration-list , parameter-declaration Returns a representation of the parameter-declaration-list, as for cp_parser_parameter_declaration_clause. However, the `void_list_node' is never appended to the list. Upon return, *IS_ERROR will be true iff an error occurred. */ static cp_parameter_declarator * cp_parser_parameter_declaration_list (cp_parser* parser, bool *is_error) { cp_parameter_declarator *parameters = NULL; cp_parameter_declarator **tail = &parameters; bool saved_in_unbraced_linkage_specification_p; /* Assume all will go well. */ *is_error = false; /* The special considerations that apply to a function within an unbraced linkage specifications do not apply to the parameters to the function. */ saved_in_unbraced_linkage_specification_p = parser->in_unbraced_linkage_specification_p; parser->in_unbraced_linkage_specification_p = false; /* Look for more parameters. */ while (true) { cp_parameter_declarator *parameter; bool parenthesized_p; /* Parse the parameter. */ parameter = cp_parser_parameter_declaration (parser, /*template_parm_p=*/false, &parenthesized_p); /* If a parse error occurred parsing the parameter declaration, then the entire parameter-declaration-list is erroneous. */ if (!parameter) { *is_error = true; parameters = NULL; break; } /* Add the new parameter to the list. */ *tail = parameter; tail = &parameter->next; /* Peek at the next token. */ if (cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_PAREN) || cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS) /* These are for Objective-C++ */ || cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON) || cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE)) /* The parameter-declaration-list is complete. */ break; else if (cp_lexer_next_token_is (parser->lexer, CPP_COMMA)) { cp_token *token; /* Peek at the next token. */ token = cp_lexer_peek_nth_token (parser->lexer, 2); /* If it's an ellipsis, then the list is complete. */ if (token->type == CPP_ELLIPSIS) break; /* Otherwise, there must be more parameters. Consume the `,'. */ cp_lexer_consume_token (parser->lexer); /* When parsing something like: int i(float f, double d) we can tell after seeing the declaration for "f" that we are not looking at an initialization of a variable "i", but rather at the declaration of a function "i". Due to the fact that the parsing of template arguments (as specified to a template-id) requires backtracking we cannot use this technique when inside a template argument list. */ if (!parser->in_template_argument_list_p && !parser->in_type_id_in_expr_p && cp_parser_uncommitted_to_tentative_parse_p (parser) /* However, a parameter-declaration of the form "foat(f)" (which is a valid declaration of a parameter "f") can also be interpreted as an expression (the conversion of "f" to "float"). */ && !parenthesized_p) cp_parser_commit_to_tentative_parse (parser); } else { cp_parser_error (parser, "expected %<,%> or %<...%>"); if (!cp_parser_uncommitted_to_tentative_parse_p (parser)) cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true, /*or_comma=*/false, /*consume_paren=*/false); break; } } parser->in_unbraced_linkage_specification_p = saved_in_unbraced_linkage_specification_p; return parameters; } /* Parse a parameter declaration. parameter-declaration: decl-specifier-seq declarator decl-specifier-seq declarator = assignment-expression decl-specifier-seq abstract-declarator [opt] decl-specifier-seq abstract-declarator [opt] = assignment-expression If TEMPLATE_PARM_P is TRUE, then this parameter-declaration declares a template parameter. (In that case, a non-nested `>' token encountered during the parsing of the assignment-expression is not interpreted as a greater-than operator.) Returns a representation of the parameter, or NULL if an error occurs. If PARENTHESIZED_P is non-NULL, *PARENTHESIZED_P is set to true iff the declarator is of the form "(p)". */ static cp_parameter_declarator * cp_parser_parameter_declaration (cp_parser *parser, bool template_parm_p, bool *parenthesized_p) { int declares_class_or_enum; bool greater_than_is_operator_p; cp_decl_specifier_seq decl_specifiers; cp_declarator *declarator; tree default_argument; cp_token *token; const char *saved_message; /* In a template parameter, `>' is not an operator. [temp.param] When parsing a default template-argument for a non-type template-parameter, the first non-nested `>' is taken as the end of the template parameter-list rather than a greater-than operator. */ greater_than_is_operator_p = !template_parm_p; /* Type definitions may not appear in parameter types. */ saved_message = parser->type_definition_forbidden_message; parser->type_definition_forbidden_message = "types may not be defined in parameter types"; /* Parse the declaration-specifiers. */ cp_parser_decl_specifier_seq (parser, CP_PARSER_FLAGS_NONE, &decl_specifiers, &declares_class_or_enum); /* If an error occurred, there's no reason to attempt to parse the rest of the declaration. */ if (cp_parser_error_occurred (parser)) { parser->type_definition_forbidden_message = saved_message; return NULL; } /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* If the next token is a `)', `,', `=', `>', or `...', then there is no declarator. */ if (token->type == CPP_CLOSE_PAREN || token->type == CPP_COMMA || token->type == CPP_EQ || token->type == CPP_ELLIPSIS || token->type == CPP_GREATER) { declarator = NULL; if (parenthesized_p) *parenthesized_p = false; } /* Otherwise, there should be a declarator. */ else { bool saved_default_arg_ok_p = parser->default_arg_ok_p; parser->default_arg_ok_p = false; /* After seeing a decl-specifier-seq, if the next token is not a "(", there is no possibility that the code is a valid expression. Therefore, if parsing tentatively, we commit at this point. */ if (!parser->in_template_argument_list_p /* In an expression context, having seen: (int((char ... we cannot be sure whether we are looking at a function-type (taking a "char" as a parameter) or a cast of some object of type "char" to "int". */ && !parser->in_type_id_in_expr_p && cp_parser_uncommitted_to_tentative_parse_p (parser) && cp_lexer_next_token_is_not (parser->lexer, CPP_OPEN_PAREN)) cp_parser_commit_to_tentative_parse (parser); /* Parse the declarator. */ declarator = cp_parser_declarator (parser, CP_PARSER_DECLARATOR_EITHER, /*ctor_dtor_or_conv_p=*/NULL, parenthesized_p, /*member_p=*/false); parser->default_arg_ok_p = saved_default_arg_ok_p; /* After the declarator, allow more attributes. */ decl_specifiers.attributes = chainon (decl_specifiers.attributes, cp_parser_attributes_opt (parser)); } /* The restriction on defining new types applies only to the type of the parameter, not to the default argument. */ parser->type_definition_forbidden_message = saved_message; /* If the next token is `=', then process a default argument. */ if (cp_lexer_next_token_is (parser->lexer, CPP_EQ)) { bool saved_greater_than_is_operator_p; /* Consume the `='. */ cp_lexer_consume_token (parser->lexer); /* If we are defining a class, then the tokens that make up the default argument must be saved and processed later. */ if (!template_parm_p && at_class_scope_p () && TYPE_BEING_DEFINED (current_class_type)) { unsigned depth = 0; cp_token *first_token; cp_token *token; /* Add tokens until we have processed the entire default argument. We add the range [first_token, token). */ first_token = cp_lexer_peek_token (parser->lexer); while (true) { bool done = false; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* What we do depends on what token we have. */ switch (token->type) { /* In valid code, a default argument must be immediately followed by a `,' `)', or `...'. */ case CPP_COMMA: case CPP_CLOSE_PAREN: case CPP_ELLIPSIS: /* If we run into a non-nested `;', `}', or `]', then the code is invalid -- but the default argument is certainly over. */ case CPP_SEMICOLON: case CPP_CLOSE_BRACE: case CPP_CLOSE_SQUARE: if (depth == 0) done = true; /* Update DEPTH, if necessary. */ else if (token->type == CPP_CLOSE_PAREN || token->type == CPP_CLOSE_BRACE || token->type == CPP_CLOSE_SQUARE) --depth; break; case CPP_OPEN_PAREN: case CPP_OPEN_SQUARE: case CPP_OPEN_BRACE: ++depth; break; case CPP_GREATER: /* If we see a non-nested `>', and `>' is not an operator, then it marks the end of the default argument. */ if (!depth && !greater_than_is_operator_p) done = true; break; /* If we run out of tokens, issue an error message. */ case CPP_EOF: case CPP_PRAGMA_EOL: error ("file ends in default argument"); done = true; break; case CPP_NAME: case CPP_SCOPE: /* In these cases, we should look for template-ids. For example, if the default argument is `X<int, double>()', we need to do name lookup to figure out whether or not `X' is a template; if so, the `,' does not end the default argument. That is not yet done. */ break; default: break; } /* If we've reached the end, stop. */ if (done) break; /* Add the token to the token block. */ token = cp_lexer_consume_token (parser->lexer); } /* Create a DEFAULT_ARG to represented the unparsed default argument. */ default_argument = make_node (DEFAULT_ARG); DEFARG_TOKENS (default_argument) = cp_token_cache_new (first_token, token); DEFARG_INSTANTIATIONS (default_argument) = NULL; } /* Outside of a class definition, we can just parse the assignment-expression. */ else { bool saved_local_variables_forbidden_p; /* Make sure that PARSER->GREATER_THAN_IS_OPERATOR_P is set correctly. */ saved_greater_than_is_operator_p = parser->greater_than_is_operator_p; parser->greater_than_is_operator_p = greater_than_is_operator_p; /* Local variable names (and the `this' keyword) may not appear in a default argument. */ saved_local_variables_forbidden_p = parser->local_variables_forbidden_p; parser->local_variables_forbidden_p = true; /* The default argument expression may cause implicitly defined member functions to be synthesized, which will result in garbage collection. We must treat this situation as if we were within the body of function so as to avoid collecting live data on the stack. */ ++function_depth; /* Parse the assignment-expression. */ if (template_parm_p) push_deferring_access_checks (dk_no_deferred); default_argument = cp_parser_assignment_expression (parser, /*cast_p=*/false); if (template_parm_p) pop_deferring_access_checks (); /* Restore saved state. */ --function_depth; parser->greater_than_is_operator_p = saved_greater_than_is_operator_p; parser->local_variables_forbidden_p = saved_local_variables_forbidden_p; } if (!parser->default_arg_ok_p) { if (!flag_pedantic_errors) warning (0, "deprecated use of default argument for parameter of non-function"); else { error ("default arguments are only permitted for function parameters"); default_argument = NULL_TREE; } } } else default_argument = NULL_TREE; return make_parameter_declarator (&decl_specifiers, declarator, default_argument); } /* Parse a function-body. function-body: compound_statement */ static void cp_parser_function_body (cp_parser *parser) { /* APPLE LOCAL radar 5982990 */ cp_parser_compound_statement (parser, NULL, false, false); } /* Parse a ctor-initializer-opt followed by a function-body. Return true if a ctor-initializer was present. */ static bool cp_parser_ctor_initializer_opt_and_function_body (cp_parser *parser) { tree body; bool ctor_initializer_p; /* Begin the function body. */ body = begin_function_body (); /* Parse the optional ctor-initializer. */ ctor_initializer_p = cp_parser_ctor_initializer_opt (parser); /* Parse the function-body. */ cp_parser_function_body (parser); /* Finish the function body. */ finish_function_body (body); return ctor_initializer_p; } /* Parse an initializer. initializer: = initializer-clause ( expression-list ) Returns an expression representing the initializer. If no initializer is present, NULL_TREE is returned. *IS_PARENTHESIZED_INIT is set to TRUE if the `( expression-list )' production is used, and zero otherwise. *IS_PARENTHESIZED_INIT is set to FALSE if there is no initializer present. If there is an initializer, and it is not a constant-expression, *NON_CONSTANT_P is set to true; otherwise it is set to false. */ static tree cp_parser_initializer (cp_parser* parser, bool* is_parenthesized_init, bool* non_constant_p) { cp_token *token; tree init; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* Let our caller know whether or not this initializer was parenthesized. */ *is_parenthesized_init = (token->type == CPP_OPEN_PAREN); /* Assume that the initializer is constant. */ *non_constant_p = false; if (token->type == CPP_EQ) { /* Consume the `='. */ cp_lexer_consume_token (parser->lexer); /* Parse the initializer-clause. */ init = cp_parser_initializer_clause (parser, non_constant_p); } else if (token->type == CPP_OPEN_PAREN) init = cp_parser_parenthesized_expression_list (parser, false, /*cast_p=*/false, non_constant_p); else { /* Anything else is an error. */ cp_parser_error (parser, "expected initializer"); init = error_mark_node; } return init; } /* Parse an initializer-clause. initializer-clause: assignment-expression { initializer-list , [opt] } { } Returns an expression representing the initializer. If the `assignment-expression' production is used the value returned is simply a representation for the expression. Otherwise, a CONSTRUCTOR is returned. The CONSTRUCTOR_ELTS will be the elements of the initializer-list (or NULL, if the last production is used). The TREE_TYPE for the CONSTRUCTOR will be NULL_TREE. There is no way to detect whether or not the optional trailing `,' was provided. NON_CONSTANT_P is as for cp_parser_initializer. */ static tree cp_parser_initializer_clause (cp_parser* parser, bool* non_constant_p) { tree initializer; /* Assume the expression is constant. */ *non_constant_p = false; /* If it is not a `{', then we are looking at an assignment-expression. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_OPEN_BRACE)) { initializer = cp_parser_constant_expression (parser, /*allow_non_constant_p=*/true, non_constant_p); if (!*non_constant_p) initializer = fold_non_dependent_expr (initializer); } else { /* Consume the `{' token. */ cp_lexer_consume_token (parser->lexer); /* Create a CONSTRUCTOR to represent the braced-initializer. */ initializer = make_node (CONSTRUCTOR); /* If it's not a `}', then there is a non-trivial initializer. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_CLOSE_BRACE)) { /* Parse the initializer list. */ CONSTRUCTOR_ELTS (initializer) = cp_parser_initializer_list (parser, non_constant_p); /* A trailing `,' token is allowed. */ if (cp_lexer_next_token_is (parser->lexer, CPP_COMMA)) cp_lexer_consume_token (parser->lexer); } /* Now, there should be a trailing `}'. */ cp_parser_require (parser, CPP_CLOSE_BRACE, "`}'"); } return initializer; } /* Parse an initializer-list. initializer-list: initializer-clause initializer-list , initializer-clause GNU Extension: initializer-list: identifier : initializer-clause initializer-list, identifier : initializer-clause Returns a VEC of constructor_elt. The VALUE of each elt is an expression for the initializer. If the INDEX of the elt is non-NULL, it is the IDENTIFIER_NODE naming the field to initialize. NON_CONSTANT_P is as for cp_parser_initializer. */ static VEC(constructor_elt,gc) * cp_parser_initializer_list (cp_parser* parser, bool* non_constant_p) { VEC(constructor_elt,gc) *v = NULL; /* Assume all of the expressions are constant. */ *non_constant_p = false; /* Parse the rest of the list. */ while (true) { cp_token *token; tree identifier; tree initializer; bool clause_non_constant_p; /* If the next token is an identifier and the following one is a colon, we are looking at the GNU designated-initializer syntax. */ if (cp_parser_allow_gnu_extensions_p (parser) && cp_lexer_next_token_is (parser->lexer, CPP_NAME) && cp_lexer_peek_nth_token (parser->lexer, 2)->type == CPP_COLON) { /* Warn the user that they are using an extension. */ if (pedantic) pedwarn ("ISO C++ does not allow designated initializers"); /* Consume the identifier. */ identifier = cp_lexer_consume_token (parser->lexer)->u.value; /* Consume the `:'. */ cp_lexer_consume_token (parser->lexer); } else identifier = NULL_TREE; /* Parse the initializer. */ initializer = cp_parser_initializer_clause (parser, &clause_non_constant_p); /* If any clause is non-constant, so is the entire initializer. */ if (clause_non_constant_p) *non_constant_p = true; /* Add it to the vector. */ CONSTRUCTOR_APPEND_ELT(v, identifier, initializer); /* If the next token is not a comma, we have reached the end of the list. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_COMMA)) break; /* Peek at the next token. */ token = cp_lexer_peek_nth_token (parser->lexer, 2); /* If the next token is a `}', then we're still done. An initializer-clause can have a trailing `,' after the initializer-list and before the closing `}'. */ if (token->type == CPP_CLOSE_BRACE) break; /* Consume the `,' token. */ cp_lexer_consume_token (parser->lexer); } return v; } /* Classes [gram.class] */ /* Parse a class-name. class-name: identifier template-id TYPENAME_KEYWORD_P is true iff the `typename' keyword has been used to indicate that names looked up in dependent types should be assumed to be types. TEMPLATE_KEYWORD_P is true iff the `template' keyword has been used to indicate that the name that appears next is a template. TAG_TYPE indicates the explicit tag given before the type name, if any. If CHECK_DEPENDENCY_P is FALSE, names are looked up in dependent scopes. If CLASS_HEAD_P is TRUE, this class is the class being defined in a class-head. Returns the TYPE_DECL representing the class. */ static tree cp_parser_class_name (cp_parser *parser, bool typename_keyword_p, bool template_keyword_p, enum tag_types tag_type, bool check_dependency_p, bool class_head_p, bool is_declaration) { tree decl; tree scope; bool typename_p; cp_token *token; /* All class-names start with an identifier. */ token = cp_lexer_peek_token (parser->lexer); if (token->type != CPP_NAME && token->type != CPP_TEMPLATE_ID) { cp_parser_error (parser, "expected class-name"); return error_mark_node; } /* PARSER->SCOPE can be cleared when parsing the template-arguments to a template-id, so we save it here. */ scope = parser->scope; if (scope == error_mark_node) return error_mark_node; /* Any name names a type if we're following the `typename' keyword in a qualified name where the enclosing scope is type-dependent. */ typename_p = (typename_keyword_p && scope && TYPE_P (scope) && dependent_type_p (scope)); /* Handle the common case (an identifier, but not a template-id) efficiently. */ if (token->type == CPP_NAME && !cp_parser_nth_token_starts_template_argument_list_p (parser, 2)) { cp_token *identifier_token; tree identifier; bool ambiguous_p; /* Look for the identifier. */ identifier_token = cp_lexer_peek_token (parser->lexer); ambiguous_p = identifier_token->ambiguous_p; identifier = cp_parser_identifier (parser); /* If the next token isn't an identifier, we are certainly not looking at a class-name. */ if (identifier == error_mark_node) decl = error_mark_node; /* If we know this is a type-name, there's no need to look it up. */ else if (typename_p) decl = identifier; else { tree ambiguous_decls; /* If we already know that this lookup is ambiguous, then we've already issued an error message; there's no reason to check again. */ if (ambiguous_p) { cp_parser_simulate_error (parser); return error_mark_node; } /* If the next token is a `::', then the name must be a type name. [basic.lookup.qual] During the lookup for a name preceding the :: scope resolution operator, object, function, and enumerator names are ignored. */ if (cp_lexer_next_token_is (parser->lexer, CPP_SCOPE)) tag_type = typename_type; /* Look up the name. */ decl = cp_parser_lookup_name (parser, identifier, tag_type, /*is_template=*/false, /*is_namespace=*/false, check_dependency_p, &ambiguous_decls); if (ambiguous_decls) { error ("reference to %qD is ambiguous", identifier); print_candidates (ambiguous_decls); if (cp_parser_parsing_tentatively (parser)) { identifier_token->ambiguous_p = true; cp_parser_simulate_error (parser); } return error_mark_node; } } } else { /* Try a template-id. */ decl = cp_parser_template_id (parser, template_keyword_p, check_dependency_p, is_declaration); if (decl == error_mark_node) return error_mark_node; } decl = cp_parser_maybe_treat_template_as_class (decl, class_head_p); /* If this is a typename, create a TYPENAME_TYPE. */ if (typename_p && decl != error_mark_node) { decl = make_typename_type (scope, decl, typename_type, /*complain=*/tf_error); if (decl != error_mark_node) decl = TYPE_NAME (decl); } /* Check to see that it is really the name of a class. */ if (TREE_CODE (decl) == TEMPLATE_ID_EXPR && TREE_CODE (TREE_OPERAND (decl, 0)) == IDENTIFIER_NODE && cp_lexer_next_token_is (parser->lexer, CPP_SCOPE)) /* Situations like this: template <typename T> struct A { typename T::template X<int>::I i; }; are problematic. Is `T::template X<int>' a class-name? The standard does not seem to be definitive, but there is no other valid interpretation of the following `::'. Therefore, those names are considered class-names. */ { decl = make_typename_type (scope, decl, tag_type, tf_error); if (decl != error_mark_node) decl = TYPE_NAME (decl); } else if (TREE_CODE (decl) != TYPE_DECL || TREE_TYPE (decl) == error_mark_node /* APPLE LOCAL begin radar 5277239 */ || !IS_AGGR_TYPE (TREE_TYPE (decl)) || cp_objc_property_reference_prefix (parser, TREE_TYPE (decl))) /* APPLE LOCAL end radar 5277239 */ decl = error_mark_node; if (decl == error_mark_node) cp_parser_error (parser, "expected class-name"); return decl; } /* Parse a class-specifier. class-specifier: class-head { member-specification [opt] } Returns the TREE_TYPE representing the class. */ static tree cp_parser_class_specifier (cp_parser* parser) { cp_token *token; tree type; tree attributes = NULL_TREE; int has_trailing_semicolon; bool nested_name_specifier_p; unsigned saved_num_template_parameter_lists; bool saved_in_function_body; tree old_scope = NULL_TREE; tree scope = NULL_TREE; tree bases; push_deferring_access_checks (dk_no_deferred); /* Parse the class-head. */ type = cp_parser_class_head (parser, &nested_name_specifier_p, &attributes, &bases); /* If the class-head was a semantic disaster, skip the entire body of the class. */ if (!type) { cp_parser_skip_to_end_of_block_or_statement (parser); pop_deferring_access_checks (); return error_mark_node; } /* Look for the `{'. */ if (!cp_parser_require (parser, CPP_OPEN_BRACE, "`{'")) { pop_deferring_access_checks (); return error_mark_node; } /* Process the base classes. If they're invalid, skip the entire class body. */ if (!xref_basetypes (type, bases)) { cp_parser_skip_to_closing_brace (parser); /* Consuming the closing brace yields better error messages later on. */ cp_lexer_consume_token (parser->lexer); pop_deferring_access_checks (); return error_mark_node; } /* Issue an error message if type-definitions are forbidden here. */ cp_parser_check_type_definition (parser); /* Remember that we are defining one more class. */ ++parser->num_classes_being_defined; /* Inside the class, surrounding template-parameter-lists do not apply. */ saved_num_template_parameter_lists = parser->num_template_parameter_lists; parser->num_template_parameter_lists = 0; /* We are not in a function body. */ saved_in_function_body = parser->in_function_body; parser->in_function_body = false; /* Start the class. */ if (nested_name_specifier_p) { scope = CP_DECL_CONTEXT (TYPE_MAIN_DECL (type)); old_scope = push_inner_scope (scope); } type = begin_class_definition (type, attributes); if (type == error_mark_node) /* If the type is erroneous, skip the entire body of the class. */ cp_parser_skip_to_closing_brace (parser); else /* Parse the member-specification. */ cp_parser_member_specification_opt (parser); /* Look for the trailing `}'. */ cp_parser_require (parser, CPP_CLOSE_BRACE, "`}'"); /* We get better error messages by noticing a common problem: a missing trailing `;'. */ token = cp_lexer_peek_token (parser->lexer); has_trailing_semicolon = (token->type == CPP_SEMICOLON); /* Look for trailing attributes to apply to this class. */ if (cp_parser_allow_gnu_extensions_p (parser)) attributes = cp_parser_attributes_opt (parser); if (type != error_mark_node) type = finish_struct (type, attributes); if (nested_name_specifier_p) pop_inner_scope (old_scope, scope); /* If this class is not itself within the scope of another class, then we need to parse the bodies of all of the queued function definitions. Note that the queued functions defined in a class are not always processed immediately following the class-specifier for that class. Consider: struct A { struct B { void f() { sizeof (A); } }; }; If `f' were processed before the processing of `A' were completed, there would be no way to compute the size of `A'. Note that the nesting we are interested in here is lexical -- not the semantic nesting given by TYPE_CONTEXT. In particular, for: struct A { struct B; }; struct A::B { void f() { } }; there is no need to delay the parsing of `A::B::f'. */ if (--parser->num_classes_being_defined == 0) { tree queue_entry; tree fn; tree class_type = NULL_TREE; tree pushed_scope = NULL_TREE; /* In a first pass, parse default arguments to the functions. Then, in a second pass, parse the bodies of the functions. This two-phased approach handles cases like: struct S { void f() { g(); } void g(int i = 3); }; */ for (TREE_PURPOSE (parser->unparsed_functions_queues) = nreverse (TREE_PURPOSE (parser->unparsed_functions_queues)); (queue_entry = TREE_PURPOSE (parser->unparsed_functions_queues)); TREE_PURPOSE (parser->unparsed_functions_queues) = TREE_CHAIN (TREE_PURPOSE (parser->unparsed_functions_queues))) { fn = TREE_VALUE (queue_entry); /* If there are default arguments that have not yet been processed, take care of them now. */ if (class_type != TREE_PURPOSE (queue_entry)) { if (pushed_scope) pop_scope (pushed_scope); class_type = TREE_PURPOSE (queue_entry); pushed_scope = push_scope (class_type); } /* Make sure that any template parameters are in scope. */ maybe_begin_member_template_processing (fn); /* Parse the default argument expressions. */ cp_parser_late_parsing_default_args (parser, fn); /* Remove any template parameters from the symbol table. */ maybe_end_member_template_processing (); } if (pushed_scope) pop_scope (pushed_scope); /* Now parse the body of the functions. */ for (TREE_VALUE (parser->unparsed_functions_queues) = nreverse (TREE_VALUE (parser->unparsed_functions_queues)); (queue_entry = TREE_VALUE (parser->unparsed_functions_queues)); TREE_VALUE (parser->unparsed_functions_queues) = TREE_CHAIN (TREE_VALUE (parser->unparsed_functions_queues))) { /* Figure out which function we need to process. */ fn = TREE_VALUE (queue_entry); /* Parse the function. */ cp_parser_late_parsing_for_member (parser, fn); } } /* Put back any saved access checks. */ pop_deferring_access_checks (); /* Restore saved state. */ parser->in_function_body = saved_in_function_body; parser->num_template_parameter_lists = saved_num_template_parameter_lists; return type; } /* Parse a class-head. class-head: class-key identifier [opt] base-clause [opt] class-key nested-name-specifier identifier base-clause [opt] class-key nested-name-specifier [opt] template-id base-clause [opt] GNU Extensions: class-key attributes identifier [opt] base-clause [opt] class-key attributes nested-name-specifier identifier base-clause [opt] class-key attributes nested-name-specifier [opt] template-id base-clause [opt] Returns the TYPE of the indicated class. Sets *NESTED_NAME_SPECIFIER_P to TRUE iff one of the productions involving a nested-name-specifier was used, and FALSE otherwise. Returns error_mark_node if this is not a class-head. Returns NULL_TREE if the class-head is syntactically valid, but semantically invalid in a way that means we should skip the entire body of the class. */ static tree cp_parser_class_head (cp_parser* parser, bool* nested_name_specifier_p, tree *attributes_p, tree *bases) { tree nested_name_specifier; enum tag_types class_key; tree id = NULL_TREE; tree type = NULL_TREE; tree attributes; bool template_id_p = false; bool qualified_p = false; bool invalid_nested_name_p = false; bool invalid_explicit_specialization_p = false; tree pushed_scope = NULL_TREE; unsigned num_templates; /* Assume no nested-name-specifier will be present. */ *nested_name_specifier_p = false; /* Assume no template parameter lists will be used in defining the type. */ num_templates = 0; /* Look for the class-key. */ class_key = cp_parser_class_key (parser); if (class_key == none_type) return error_mark_node; /* Parse the attributes. */ attributes = cp_parser_attributes_opt (parser); /* If the next token is `::', that is invalid -- but sometimes people do try to write: struct ::S {}; Handle this gracefully by accepting the extra qualifier, and then issuing an error about it later if this really is a class-head. If it turns out just to be an elaborated type specifier, remain silent. */ if (cp_parser_global_scope_opt (parser, /*current_scope_valid_p=*/false)) qualified_p = true; push_deferring_access_checks (dk_no_check); /* Determine the name of the class. Begin by looking for an optional nested-name-specifier. */ nested_name_specifier = cp_parser_nested_name_specifier_opt (parser, /*typename_keyword_p=*/false, /*check_dependency_p=*/false, /*type_p=*/false, /*is_declaration=*/false); /* If there was a nested-name-specifier, then there *must* be an identifier. */ if (nested_name_specifier) { /* Although the grammar says `identifier', it really means `class-name' or `template-name'. You are only allowed to define a class that has already been declared with this syntax. The proposed resolution for Core Issue 180 says that wherever you see `class T::X' you should treat `X' as a type-name. It is OK to define an inaccessible class; for example: class A { class B; }; class A::B {}; We do not know if we will see a class-name, or a template-name. We look for a class-name first, in case the class-name is a template-id; if we looked for the template-name first we would stop after the template-name. */ cp_parser_parse_tentatively (parser); type = cp_parser_class_name (parser, /*typename_keyword_p=*/false, /*template_keyword_p=*/false, class_type, /*check_dependency_p=*/false, /*class_head_p=*/true, /*is_declaration=*/false); /* If that didn't work, ignore the nested-name-specifier. */ if (!cp_parser_parse_definitely (parser)) { invalid_nested_name_p = true; id = cp_parser_identifier (parser); if (id == error_mark_node) id = NULL_TREE; } /* If we could not find a corresponding TYPE, treat this declaration like an unqualified declaration. */ if (type == error_mark_node) nested_name_specifier = NULL_TREE; /* Otherwise, count the number of templates used in TYPE and its containing scopes. */ else { tree scope; for (scope = TREE_TYPE (type); scope && TREE_CODE (scope) != NAMESPACE_DECL; scope = (TYPE_P (scope) ? TYPE_CONTEXT (scope) : DECL_CONTEXT (scope))) if (TYPE_P (scope) && CLASS_TYPE_P (scope) && CLASSTYPE_TEMPLATE_INFO (scope) && PRIMARY_TEMPLATE_P (CLASSTYPE_TI_TEMPLATE (scope)) && !CLASSTYPE_TEMPLATE_SPECIALIZATION (scope)) ++num_templates; } } /* Otherwise, the identifier is optional. */ else { /* We don't know whether what comes next is a template-id, an identifier, or nothing at all. */ cp_parser_parse_tentatively (parser); /* Check for a template-id. */ id = cp_parser_template_id (parser, /*template_keyword_p=*/false, /*check_dependency_p=*/true, /*is_declaration=*/true); /* If that didn't work, it could still be an identifier. */ if (!cp_parser_parse_definitely (parser)) { if (cp_lexer_next_token_is (parser->lexer, CPP_NAME)) id = cp_parser_identifier (parser); else id = NULL_TREE; } else { template_id_p = true; ++num_templates; } } pop_deferring_access_checks (); if (id) cp_parser_check_for_invalid_template_id (parser, id); /* If it's not a `:' or a `{' then we can't really be looking at a class-head, since a class-head only appears as part of a class-specifier. We have to detect this situation before calling xref_tag, since that has irreversible side-effects. */ if (!cp_parser_next_token_starts_class_definition_p (parser)) { cp_parser_error (parser, "expected %<{%> or %<:%>"); return error_mark_node; } /* At this point, we're going ahead with the class-specifier, even if some other problem occurs. */ cp_parser_commit_to_tentative_parse (parser); /* Issue the error about the overly-qualified name now. */ if (qualified_p) cp_parser_error (parser, "global qualification of class name is invalid"); else if (invalid_nested_name_p) cp_parser_error (parser, "qualified name does not name a class"); else if (nested_name_specifier) { tree scope; /* Reject typedef-names in class heads. */ if (!DECL_IMPLICIT_TYPEDEF_P (type)) { error ("invalid class name in declaration of %qD", type); type = NULL_TREE; goto done; } /* Figure out in what scope the declaration is being placed. */ scope = current_scope (); /* If that scope does not contain the scope in which the class was originally declared, the program is invalid. */ if (scope && !is_ancestor (scope, nested_name_specifier)) { error ("declaration of %qD in %qD which does not enclose %qD", type, scope, nested_name_specifier); type = NULL_TREE; goto done; } /* [dcl.meaning] A declarator-id shall not be qualified exception of the definition of a ... nested class outside of its class ... [or] a the definition or explicit instantiation of a class member of a namespace outside of its namespace. */ if (scope == nested_name_specifier) { pedwarn ("extra qualification ignored"); nested_name_specifier = NULL_TREE; num_templates = 0; } } /* An explicit-specialization must be preceded by "template <>". If it is not, try to recover gracefully. */ if (at_namespace_scope_p () && parser->num_template_parameter_lists == 0 && template_id_p) { error ("an explicit specialization must be preceded by %<template <>%>"); invalid_explicit_specialization_p = true; /* Take the same action that would have been taken by cp_parser_explicit_specialization. */ ++parser->num_template_parameter_lists; begin_specialization (); } /* There must be no "return" statements between this point and the end of this function; set "type "to the correct return value and use "goto done;" to return. */ /* Make sure that the right number of template parameters were present. */ if (!cp_parser_check_template_parameters (parser, num_templates)) { /* If something went wrong, there is no point in even trying to process the class-definition. */ type = NULL_TREE; goto done; } /* Look up the type. */ if (template_id_p) { type = TREE_TYPE (id); type = maybe_process_partial_specialization (type); if (nested_name_specifier) pushed_scope = push_scope (nested_name_specifier); } else if (nested_name_specifier) { tree class_type; /* Given: template <typename T> struct S { struct T }; template <typename T> struct S<T>::T { }; we will get a TYPENAME_TYPE when processing the definition of `S::T'. We need to resolve it to the actual type before we try to define it. */ if (TREE_CODE (TREE_TYPE (type)) == TYPENAME_TYPE) { class_type = resolve_typename_type (TREE_TYPE (type), /*only_current_p=*/false); if (class_type != error_mark_node) type = TYPE_NAME (class_type); else { cp_parser_error (parser, "could not resolve typename type"); type = error_mark_node; } } maybe_process_partial_specialization (TREE_TYPE (type)); class_type = current_class_type; /* Enter the scope indicated by the nested-name-specifier. */ pushed_scope = push_scope (nested_name_specifier); /* Get the canonical version of this type. */ type = TYPE_MAIN_DECL (TREE_TYPE (type)); if (PROCESSING_REAL_TEMPLATE_DECL_P () && !CLASSTYPE_TEMPLATE_SPECIALIZATION (TREE_TYPE (type))) { type = push_template_decl (type); if (type == error_mark_node) { type = NULL_TREE; goto done; } } type = TREE_TYPE (type); *nested_name_specifier_p = true; } else /* The name is not a nested name. */ { /* If the class was unnamed, create a dummy name. */ if (!id) id = make_anon_name (); type = xref_tag (class_key, id, /*tag_scope=*/ts_current, parser->num_template_parameter_lists); } /* Indicate whether this class was declared as a `class' or as a `struct'. */ if (TREE_CODE (type) == RECORD_TYPE) CLASSTYPE_DECLARED_CLASS (type) = (class_key == class_type); cp_parser_check_class_key (class_key, type); /* If this type was already complete, and we see another definition, that's an error. */ if (type != error_mark_node && COMPLETE_TYPE_P (type)) { error ("redefinition of %q#T", type); error ("previous definition of %q+#T", type); type = NULL_TREE; goto done; } else if (type == error_mark_node) type = NULL_TREE; /* We will have entered the scope containing the class; the names of base classes should be looked up in that context. For example: struct A { struct B {}; struct C; }; struct A::C : B {}; is valid. */ *bases = NULL_TREE; /* Get the list of base-classes, if there is one. */ if (cp_lexer_next_token_is (parser->lexer, CPP_COLON)) *bases = cp_parser_base_clause (parser); done: /* Leave the scope given by the nested-name-specifier. We will enter the class scope itself while processing the members. */ if (pushed_scope) pop_scope (pushed_scope); if (invalid_explicit_specialization_p) { end_specialization (); --parser->num_template_parameter_lists; } *attributes_p = attributes; return type; } /* Parse a class-key. class-key: class struct union Returns the kind of class-key specified, or none_type to indicate error. */ static enum tag_types cp_parser_class_key (cp_parser* parser) { cp_token *token; enum tag_types tag_type; /* Look for the class-key. */ token = cp_parser_require (parser, CPP_KEYWORD, "class-key"); if (!token) return none_type; /* Check to see if the TOKEN is a class-key. */ tag_type = cp_parser_token_is_class_key (token); if (!tag_type) cp_parser_error (parser, "expected class-key"); return tag_type; } /* Parse an (optional) member-specification. member-specification: member-declaration member-specification [opt] access-specifier : member-specification [opt] */ static void cp_parser_member_specification_opt (cp_parser* parser) { while (true) { cp_token *token; enum rid keyword; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* If it's a `}', or EOF then we've seen all the members. */ if (token->type == CPP_CLOSE_BRACE || token->type == CPP_EOF || token->type == CPP_PRAGMA_EOL) break; /* See if this token is a keyword. */ keyword = token->keyword; switch (keyword) { case RID_PUBLIC: case RID_PROTECTED: case RID_PRIVATE: /* Consume the access-specifier. */ cp_lexer_consume_token (parser->lexer); /* Remember which access-specifier is active. */ current_access_specifier = token->u.value; /* Look for the `:'. */ cp_parser_require (parser, CPP_COLON, "`:'"); break; default: /* Accept #pragmas at class scope. */ if (token->type == CPP_PRAGMA) { cp_parser_pragma (parser, pragma_external); break; } /* Otherwise, the next construction must be a member-declaration. */ cp_parser_member_declaration (parser); } } } /* Parse a member-declaration. member-declaration: decl-specifier-seq [opt] member-declarator-list [opt] ; function-definition ; [opt] :: [opt] nested-name-specifier template [opt] unqualified-id ; using-declaration template-declaration member-declarator-list: member-declarator member-declarator-list , member-declarator member-declarator: declarator pure-specifier [opt] declarator constant-initializer [opt] identifier [opt] : constant-expression GNU Extensions: member-declaration: __extension__ member-declaration member-declarator: declarator attributes [opt] pure-specifier [opt] declarator attributes [opt] constant-initializer [opt] identifier [opt] attributes [opt] : constant-expression */ static void cp_parser_member_declaration (cp_parser* parser) { cp_decl_specifier_seq decl_specifiers; tree prefix_attributes; tree decl; int declares_class_or_enum; bool friend_p; cp_token *token; int saved_pedantic; /* Check for the `__extension__' keyword. */ if (cp_parser_extension_opt (parser, &saved_pedantic)) { /* Recurse. */ cp_parser_member_declaration (parser); /* Restore the old value of the PEDANTIC flag. */ pedantic = saved_pedantic; return; } /* Check for a template-declaration. */ if (cp_lexer_next_token_is_keyword (parser->lexer, RID_TEMPLATE)) { /* An explicit specialization here is an error condition, and we expect the specialization handler to detect and report this. */ if (cp_lexer_peek_nth_token (parser->lexer, 2)->type == CPP_LESS && cp_lexer_peek_nth_token (parser->lexer, 3)->type == CPP_GREATER) cp_parser_explicit_specialization (parser); else cp_parser_template_declaration (parser, /*member_p=*/true); return; } /* Check for a using-declaration. */ if (cp_lexer_next_token_is_keyword (parser->lexer, RID_USING)) { /* Parse the using-declaration. */ cp_parser_using_declaration (parser, /*access_declaration_p=*/false); return; } /* Check for @defs. */ if (cp_lexer_next_token_is_keyword (parser->lexer, RID_AT_DEFS)) { tree ivar, member; tree ivar_chains = cp_parser_objc_defs_expression (parser); ivar = ivar_chains; while (ivar) { member = ivar; ivar = TREE_CHAIN (member); TREE_CHAIN (member) = NULL_TREE; finish_member_declaration (member); } /* APPLE LOCAL begin C* warnings to easy porting to new abi */ if (flag_objc_abi == 3 || (flag_objc2_check && flag_objc_abi == 1)) warning (0, "@defs will not be supported in future"); /* APPLE LOCAL radar 4705250 */ else if (flag_objc_abi == 2 && flag_objc_atdefs != 1) error ("@defs will not be supported in future"); /* APPLE LOCAL end C* warnings to easy porting to new abi */ return; } if (cp_parser_using_declaration (parser, /*access_declaration=*/true)) return; /* Parse the decl-specifier-seq. */ cp_parser_decl_specifier_seq (parser, CP_PARSER_FLAGS_OPTIONAL, &decl_specifiers, &declares_class_or_enum); prefix_attributes = decl_specifiers.attributes; decl_specifiers.attributes = NULL_TREE; /* Check for an invalid type-name. */ if (!decl_specifiers.type && cp_parser_parse_and_diagnose_invalid_type_name (parser)) return; /* If there is no declarator, then the decl-specifier-seq should specify a type. */ if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON)) { /* If there was no decl-specifier-seq, and the next token is a `;', then we have something like: struct S { ; }; [class.mem] Each member-declaration shall declare at least one member name of the class. */ if (!decl_specifiers.any_specifiers_p) { cp_token *token = cp_lexer_peek_token (parser->lexer); if (pedantic && !token->in_system_header) pedwarn ("%Hextra %<;%>", &token->location); } else { tree type; /* See if this declaration is a friend. */ friend_p = cp_parser_friend_p (&decl_specifiers); /* If there were decl-specifiers, check to see if there was a class-declaration. */ type = check_tag_decl (&decl_specifiers); /* Nested classes have already been added to the class, but a `friend' needs to be explicitly registered. */ if (friend_p) { /* If the `friend' keyword was present, the friend must be introduced with a class-key. */ if (!declares_class_or_enum) error ("a class-key must be used when declaring a friend"); /* In this case: template <typename T> struct A { friend struct A<T>::B; }; A<T>::B will be represented by a TYPENAME_TYPE, and therefore not recognized by check_tag_decl. */ if (!type && decl_specifiers.type && TYPE_P (decl_specifiers.type)) type = decl_specifiers.type; if (!type || !TYPE_P (type)) error ("friend declaration does not name a class or " "function"); else make_friend_class (current_class_type, type, /*complain=*/true); } /* If there is no TYPE, an error message will already have been issued. */ else if (!type || type == error_mark_node) ; /* An anonymous aggregate has to be handled specially; such a declaration really declares a data member (with a particular type), as opposed to a nested class. */ else if (ANON_AGGR_TYPE_P (type)) { /* Remove constructors and such from TYPE, now that we know it is an anonymous aggregate. */ fixup_anonymous_aggr (type); /* And make the corresponding data member. */ decl = build_decl (FIELD_DECL, NULL_TREE, type); /* Add it to the class. */ finish_member_declaration (decl); } else cp_parser_check_access_in_redeclaration (TYPE_NAME (type)); } } else { /* See if these declarations will be friends. */ friend_p = cp_parser_friend_p (&decl_specifiers); /* Keep going until we hit the `;' at the end of the declaration. */ while (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON)) { tree attributes = NULL_TREE; tree first_attribute; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* Check for a bitfield declaration. */ if (token->type == CPP_COLON || (token->type == CPP_NAME && cp_lexer_peek_nth_token (parser->lexer, 2)->type == CPP_COLON)) { tree identifier; tree width; /* Get the name of the bitfield. Note that we cannot just check TOKEN here because it may have been invalidated by the call to cp_lexer_peek_nth_token above. */ if (cp_lexer_peek_token (parser->lexer)->type != CPP_COLON) identifier = cp_parser_identifier (parser); else identifier = NULL_TREE; /* Consume the `:' token. */ cp_lexer_consume_token (parser->lexer); /* Get the width of the bitfield. */ width = cp_parser_constant_expression (parser, /*allow_non_constant=*/false, NULL); /* Look for attributes that apply to the bitfield. */ attributes = cp_parser_attributes_opt (parser); /* Remember which attributes are prefix attributes and which are not. */ first_attribute = attributes; /* Combine the attributes. */ attributes = chainon (prefix_attributes, attributes); /* Create the bitfield declaration. */ decl = grokbitfield (identifier ? make_id_declarator (NULL_TREE, identifier, sfk_none) : NULL, &decl_specifiers, width); /* Apply the attributes. */ cplus_decl_attributes (&decl, attributes, /*flags=*/0); } else { cp_declarator *declarator; tree initializer; tree asm_specification; int ctor_dtor_or_conv_p; /* Parse the declarator. */ declarator = cp_parser_declarator (parser, CP_PARSER_DECLARATOR_NAMED, &ctor_dtor_or_conv_p, /*parenthesized_p=*/NULL, /*member_p=*/true); /* If something went wrong parsing the declarator, make sure that we at least consume some tokens. */ if (declarator == cp_error_declarator) { /* Skip to the end of the statement. */ cp_parser_skip_to_end_of_statement (parser); /* If the next token is not a semicolon, that is probably because we just skipped over the body of a function. So, we consume a semicolon if present, but do not issue an error message if it is not present. */ if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON)) cp_lexer_consume_token (parser->lexer); return; } if (declares_class_or_enum & 2) cp_parser_check_for_definition_in_return_type (declarator, decl_specifiers.type); /* Look for an asm-specification. */ asm_specification = cp_parser_asm_specification_opt (parser); /* Look for attributes that apply to the declaration. */ attributes = cp_parser_attributes_opt (parser); /* Remember which attributes are prefix attributes and which are not. */ first_attribute = attributes; /* Combine the attributes. */ attributes = chainon (prefix_attributes, attributes); /* If it's an `=', then we have a constant-initializer or a pure-specifier. It is not correct to parse the initializer before registering the member declaration since the member declaration should be in scope while its initializer is processed. However, the rest of the front end does not yet provide an interface that allows us to handle this correctly. */ if (cp_lexer_next_token_is (parser->lexer, CPP_EQ)) { /* In [class.mem]: A pure-specifier shall be used only in the declaration of a virtual function. A member-declarator can contain a constant-initializer only if it declares a static member of integral or enumeration type. Therefore, if the DECLARATOR is for a function, we look for a pure-specifier; otherwise, we look for a constant-initializer. When we call `grokfield', it will perform more stringent semantics checks. */ if (function_declarator_p (declarator)) initializer = cp_parser_pure_specifier (parser); else /* Parse the initializer. */ initializer = cp_parser_constant_initializer (parser); } /* Otherwise, there is no initializer. */ else initializer = NULL_TREE; /* See if we are probably looking at a function definition. We are certainly not looking at a member-declarator. Calling `grokfield' has side-effects, so we must not do it unless we are sure that we are looking at a member-declarator. */ if (cp_parser_token_starts_function_definition_p (cp_lexer_peek_token (parser->lexer))) { /* The grammar does not allow a pure-specifier to be used when a member function is defined. (It is possible that this fact is an oversight in the standard, since a pure function may be defined outside of the class-specifier. */ if (initializer) error ("pure-specifier on function-definition"); decl = cp_parser_save_member_function_body (parser, &decl_specifiers, declarator, attributes); /* If the member was not a friend, declare it here. */ if (!friend_p) finish_member_declaration (decl); /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* If the next token is a semicolon, consume it. */ if (token->type == CPP_SEMICOLON) cp_lexer_consume_token (parser->lexer); return; } else /* Create the declaration. */ decl = grokfield (declarator, &decl_specifiers, initializer, /*init_const_expr_p=*/true, asm_specification, attributes); } /* Reset PREFIX_ATTRIBUTES. */ while (attributes && TREE_CHAIN (attributes) != first_attribute) attributes = TREE_CHAIN (attributes); if (attributes) TREE_CHAIN (attributes) = NULL_TREE; /* If there is any qualification still in effect, clear it now; we will be starting fresh with the next declarator. */ parser->scope = NULL_TREE; parser->qualifying_scope = NULL_TREE; parser->object_scope = NULL_TREE; /* If it's a `,', then there are more declarators. */ if (cp_lexer_next_token_is (parser->lexer, CPP_COMMA)) cp_lexer_consume_token (parser->lexer); /* If the next token isn't a `;', then we have a parse error. */ else if (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON)) { cp_parser_error (parser, "expected %<;%>"); /* Skip tokens until we find a `;'. */ cp_parser_skip_to_end_of_statement (parser); break; } if (decl) { /* Add DECL to the list of members. */ if (!friend_p) finish_member_declaration (decl); if (TREE_CODE (decl) == FUNCTION_DECL) cp_parser_save_default_args (parser, decl); } } } cp_parser_require (parser, CPP_SEMICOLON, "`;'"); } /* Parse a pure-specifier. pure-specifier: = 0 Returns INTEGER_ZERO_NODE if a pure specifier is found. Otherwise, ERROR_MARK_NODE is returned. */ static tree cp_parser_pure_specifier (cp_parser* parser) { cp_token *token; /* Look for the `=' token. */ if (!cp_parser_require (parser, CPP_EQ, "`='")) return error_mark_node; /* Look for the `0' token. */ token = cp_lexer_consume_token (parser->lexer); /* c_lex_with_flags marks a single digit '0' with PURE_ZERO. */ if (token->type != CPP_NUMBER || !(token->flags & PURE_ZERO)) { cp_parser_error (parser, "invalid pure specifier (only `= 0' is allowed)"); cp_parser_skip_to_end_of_statement (parser); return error_mark_node; } if (PROCESSING_REAL_TEMPLATE_DECL_P ()) { error ("templates may not be %<virtual%>"); return error_mark_node; } return integer_zero_node; } /* Parse a constant-initializer. constant-initializer: = constant-expression Returns a representation of the constant-expression. */ static tree cp_parser_constant_initializer (cp_parser* parser) { /* Look for the `=' token. */ if (!cp_parser_require (parser, CPP_EQ, "`='")) return error_mark_node; /* It is invalid to write: struct S { static const int i = { 7 }; }; */ if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE)) { cp_parser_error (parser, "a brace-enclosed initializer is not allowed here"); /* Consume the opening brace. */ cp_lexer_consume_token (parser->lexer); /* Skip the initializer. */ cp_parser_skip_to_closing_brace (parser); /* Look for the trailing `}'. */ cp_parser_require (parser, CPP_CLOSE_BRACE, "`}'"); return error_mark_node; } return cp_parser_constant_expression (parser, /*allow_non_constant=*/false, NULL); } /* Derived classes [gram.class.derived] */ /* Parse a base-clause. base-clause: : base-specifier-list base-specifier-list: base-specifier base-specifier-list , base-specifier Returns a TREE_LIST representing the base-classes, in the order in which they were declared. The representation of each node is as described by cp_parser_base_specifier. In the case that no bases are specified, this function will return NULL_TREE, not ERROR_MARK_NODE. */ static tree cp_parser_base_clause (cp_parser* parser) { tree bases = NULL_TREE; /* Look for the `:' that begins the list. */ cp_parser_require (parser, CPP_COLON, "`:'"); /* Scan the base-specifier-list. */ while (true) { cp_token *token; tree base; /* Look for the base-specifier. */ base = cp_parser_base_specifier (parser); /* Add BASE to the front of the list. */ if (base != error_mark_node) { TREE_CHAIN (base) = bases; bases = base; } /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* If it's not a comma, then the list is complete. */ if (token->type != CPP_COMMA) break; /* Consume the `,'. */ cp_lexer_consume_token (parser->lexer); } /* PARSER->SCOPE may still be non-NULL at this point, if the last base class had a qualified name. However, the next name that appears is certainly not qualified. */ parser->scope = NULL_TREE; parser->qualifying_scope = NULL_TREE; parser->object_scope = NULL_TREE; return nreverse (bases); } /* Parse a base-specifier. base-specifier: :: [opt] nested-name-specifier [opt] class-name virtual access-specifier [opt] :: [opt] nested-name-specifier [opt] class-name access-specifier virtual [opt] :: [opt] nested-name-specifier [opt] class-name Returns a TREE_LIST. The TREE_PURPOSE will be one of ACCESS_{DEFAULT,PUBLIC,PROTECTED,PRIVATE}_[VIRTUAL]_NODE to indicate the specifiers provided. The TREE_VALUE will be a TYPE (or the ERROR_MARK_NODE) indicating the type that was specified. */ static tree cp_parser_base_specifier (cp_parser* parser) { cp_token *token; bool done = false; bool virtual_p = false; bool duplicate_virtual_error_issued_p = false; bool duplicate_access_error_issued_p = false; bool class_scope_p, template_p; tree access = access_default_node; tree type; /* Process the optional `virtual' and `access-specifier'. */ while (!done) { /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* Process `virtual'. */ switch (token->keyword) { case RID_VIRTUAL: /* If `virtual' appears more than once, issue an error. */ if (virtual_p && !duplicate_virtual_error_issued_p) { cp_parser_error (parser, "%<virtual%> specified more than once in base-specified"); duplicate_virtual_error_issued_p = true; } virtual_p = true; /* Consume the `virtual' token. */ cp_lexer_consume_token (parser->lexer); break; case RID_PUBLIC: case RID_PROTECTED: case RID_PRIVATE: /* If more than one access specifier appears, issue an error. */ if (access != access_default_node && !duplicate_access_error_issued_p) { cp_parser_error (parser, "more than one access specifier in base-specified"); duplicate_access_error_issued_p = true; } access = ridpointers[(int) token->keyword]; /* Consume the access-specifier. */ cp_lexer_consume_token (parser->lexer); break; default: done = true; break; } } /* It is not uncommon to see programs mechanically, erroneously, use the 'typename' keyword to denote (dependent) qualified types as base classes. */ if (cp_lexer_next_token_is_keyword (parser->lexer, RID_TYPENAME)) { if (!processing_template_decl) error ("keyword %<typename%> not allowed outside of templates"); else error ("keyword %<typename%> not allowed in this context " "(the base class is implicitly a type)"); cp_lexer_consume_token (parser->lexer); } /* Look for the optional `::' operator. */ cp_parser_global_scope_opt (parser, /*current_scope_valid_p=*/false); /* Look for the nested-name-specifier. The simplest way to implement: [temp.res] The keyword `typename' is not permitted in a base-specifier or mem-initializer; in these contexts a qualified name that depends on a template-parameter is implicitly assumed to be a type name. is to pretend that we have seen the `typename' keyword at this point. */ cp_parser_nested_name_specifier_opt (parser, /*typename_keyword_p=*/true, /*check_dependency_p=*/true, typename_type, /*is_declaration=*/true); /* If the base class is given by a qualified name, assume that names we see are type names or templates, as appropriate. */ class_scope_p = (parser->scope && TYPE_P (parser->scope)); template_p = class_scope_p && cp_parser_optional_template_keyword (parser); /* Finally, look for the class-name. */ type = cp_parser_class_name (parser, class_scope_p, template_p, typename_type, /*check_dependency_p=*/true, /*class_head_p=*/false, /*is_declaration=*/true); if (type == error_mark_node) return error_mark_node; return finish_base_specifier (TREE_TYPE (type), access, virtual_p); } /* Exception handling [gram.exception] */ /* Parse an (optional) exception-specification. exception-specification: throw ( type-id-list [opt] ) Returns a TREE_LIST representing the exception-specification. The TREE_VALUE of each node is a type. */ static tree cp_parser_exception_specification_opt (cp_parser* parser) { cp_token *token; tree type_id_list; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* If it's not `throw', then there's no exception-specification. */ if (!cp_parser_is_keyword (token, RID_THROW)) return NULL_TREE; /* Consume the `throw'. */ cp_lexer_consume_token (parser->lexer); /* Look for the `('. */ cp_parser_require (parser, CPP_OPEN_PAREN, "`('"); /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* If it's not a `)', then there is a type-id-list. */ if (token->type != CPP_CLOSE_PAREN) { const char *saved_message; /* Types may not be defined in an exception-specification. */ saved_message = parser->type_definition_forbidden_message; parser->type_definition_forbidden_message = "types may not be defined in an exception-specification"; /* Parse the type-id-list. */ type_id_list = cp_parser_type_id_list (parser); /* Restore the saved message. */ parser->type_definition_forbidden_message = saved_message; } else type_id_list = empty_except_spec; /* Look for the `)'. */ cp_parser_require (parser, CPP_CLOSE_PAREN, "`)'"); return type_id_list; } /* Parse an (optional) type-id-list. type-id-list: type-id type-id-list , type-id Returns a TREE_LIST. The TREE_VALUE of each node is a TYPE, in the order that the types were presented. */ static tree cp_parser_type_id_list (cp_parser* parser) { tree types = NULL_TREE; while (true) { cp_token *token; tree type; /* Get the next type-id. */ type = cp_parser_type_id (parser); /* Add it to the list. */ types = add_exception_specifier (types, type, /*complain=*/1); /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* If it is not a `,', we are done. */ if (token->type != CPP_COMMA) break; /* Consume the `,'. */ cp_lexer_consume_token (parser->lexer); } return nreverse (types); } /* Parse a try-block. try-block: try compound-statement handler-seq */ static tree cp_parser_try_block (cp_parser* parser) { tree try_block; cp_parser_require_keyword (parser, RID_TRY, "`try'"); try_block = begin_try_block (); /* APPLE LOCAL radar 5982990 */ cp_parser_compound_statement (parser, NULL, true, false); finish_try_block (try_block); cp_parser_handler_seq (parser); finish_handler_sequence (try_block); return try_block; } /* Parse a function-try-block. function-try-block: try ctor-initializer [opt] function-body handler-seq */ static bool cp_parser_function_try_block (cp_parser* parser) { tree compound_stmt; tree try_block; bool ctor_initializer_p; /* Look for the `try' keyword. */ if (!cp_parser_require_keyword (parser, RID_TRY, "`try'")) return false; /* Let the rest of the front-end know where we are. */ try_block = begin_function_try_block (&compound_stmt); /* Parse the function-body. */ ctor_initializer_p = cp_parser_ctor_initializer_opt_and_function_body (parser); /* We're done with the `try' part. */ finish_function_try_block (try_block); /* Parse the handlers. */ cp_parser_handler_seq (parser); /* We're done with the handlers. */ finish_function_handler_sequence (try_block, compound_stmt); return ctor_initializer_p; } /* Parse a handler-seq. handler-seq: handler handler-seq [opt] */ static void cp_parser_handler_seq (cp_parser* parser) { while (true) { cp_token *token; /* Parse the handler. */ cp_parser_handler (parser); /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* If it's not `catch' then there are no more handlers. */ if (!cp_parser_is_keyword (token, RID_CATCH)) break; } } /* Parse a handler. handler: catch ( exception-declaration ) compound-statement */ static void cp_parser_handler (cp_parser* parser) { tree handler; tree declaration; cp_parser_require_keyword (parser, RID_CATCH, "`catch'"); handler = begin_handler (); cp_parser_require (parser, CPP_OPEN_PAREN, "`('"); declaration = cp_parser_exception_declaration (parser); finish_handler_parms (declaration, handler); cp_parser_require (parser, CPP_CLOSE_PAREN, "`)'"); /* APPLE LOCAL radar 5982990 */ cp_parser_compound_statement (parser, NULL, false, false); finish_handler (handler); } /* Parse an exception-declaration. exception-declaration: type-specifier-seq declarator type-specifier-seq abstract-declarator type-specifier-seq ... Returns a VAR_DECL for the declaration, or NULL_TREE if the ellipsis variant is used. */ static tree cp_parser_exception_declaration (cp_parser* parser) { cp_decl_specifier_seq type_specifiers; cp_declarator *declarator; const char *saved_message; /* If it's an ellipsis, it's easy to handle. */ if (cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS)) { /* Consume the `...' token. */ cp_lexer_consume_token (parser->lexer); return NULL_TREE; } /* Types may not be defined in exception-declarations. */ saved_message = parser->type_definition_forbidden_message; parser->type_definition_forbidden_message = "types may not be defined in exception-declarations"; /* Parse the type-specifier-seq. */ cp_parser_type_specifier_seq (parser, /*is_condition=*/false, &type_specifiers); /* If it's a `)', then there is no declarator. */ if (cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_PAREN)) declarator = NULL; else declarator = cp_parser_declarator (parser, CP_PARSER_DECLARATOR_EITHER, /*ctor_dtor_or_conv_p=*/NULL, /*parenthesized_p=*/NULL, /*member_p=*/false); /* Restore the saved message. */ parser->type_definition_forbidden_message = saved_message; if (!type_specifiers.any_specifiers_p) return error_mark_node; return grokdeclarator (declarator, &type_specifiers, CATCHPARM, 1, NULL); } /* Parse a throw-expression. throw-expression: throw assignment-expression [opt] Returns a THROW_EXPR representing the throw-expression. */ static tree cp_parser_throw_expression (cp_parser* parser) { tree expression; cp_token* token; cp_parser_require_keyword (parser, RID_THROW, "`throw'"); token = cp_lexer_peek_token (parser->lexer); /* Figure out whether or not there is an assignment-expression following the "throw" keyword. */ if (token->type == CPP_COMMA || token->type == CPP_SEMICOLON || token->type == CPP_CLOSE_PAREN || token->type == CPP_CLOSE_SQUARE || token->type == CPP_CLOSE_BRACE || token->type == CPP_COLON) expression = NULL_TREE; else expression = cp_parser_assignment_expression (parser, /*cast_p=*/false); return build_throw (expression); } /* GNU Extensions */ /* Parse an (optional) asm-specification. asm-specification: asm ( string-literal ) If the asm-specification is present, returns a STRING_CST corresponding to the string-literal. Otherwise, returns NULL_TREE. */ static tree cp_parser_asm_specification_opt (cp_parser* parser) { cp_token *token; tree asm_specification; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* If the next token isn't the `asm' keyword, then there's no asm-specification. */ if (!cp_parser_is_keyword (token, RID_ASM)) return NULL_TREE; /* Consume the `asm' token. */ cp_lexer_consume_token (parser->lexer); /* Look for the `('. */ cp_parser_require (parser, CPP_OPEN_PAREN, "`('"); /* Look for the string-literal. */ asm_specification = cp_parser_string_literal (parser, false, false); /* Look for the `)'. */ cp_parser_require (parser, CPP_CLOSE_PAREN, "`('"); return asm_specification; } /* Parse an asm-operand-list. asm-operand-list: asm-operand asm-operand-list , asm-operand asm-operand: string-literal ( expression ) [ string-literal ] string-literal ( expression ) Returns a TREE_LIST representing the operands. The TREE_VALUE of each node is the expression. The TREE_PURPOSE is itself a TREE_LIST whose TREE_PURPOSE is a STRING_CST for the bracketed string-literal (or NULL_TREE if not present) and whose TREE_VALUE is a STRING_CST for the string literal before the parenthesis. */ static tree cp_parser_asm_operand_list (cp_parser* parser) { tree asm_operands = NULL_TREE; while (true) { tree string_literal; tree expression; tree name; if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_SQUARE)) { /* Consume the `[' token. */ cp_lexer_consume_token (parser->lexer); /* Read the operand name. */ name = cp_parser_identifier (parser); if (name != error_mark_node) name = build_string (IDENTIFIER_LENGTH (name), IDENTIFIER_POINTER (name)); /* Look for the closing `]'. */ cp_parser_require (parser, CPP_CLOSE_SQUARE, "`]'"); } else name = NULL_TREE; /* Look for the string-literal. */ string_literal = cp_parser_string_literal (parser, false, false); /* Look for the `('. */ cp_parser_require (parser, CPP_OPEN_PAREN, "`('"); /* Parse the expression. */ expression = cp_parser_expression (parser, /*cast_p=*/false); /* Look for the `)'. */ cp_parser_require (parser, CPP_CLOSE_PAREN, "`)'"); /* Add this operand to the list. */ asm_operands = tree_cons (build_tree_list (name, string_literal), expression, asm_operands); /* If the next token is not a `,', there are no more operands. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_COMMA)) break; /* Consume the `,'. */ cp_lexer_consume_token (parser->lexer); } return nreverse (asm_operands); } /* Parse an asm-clobber-list. asm-clobber-list: string-literal asm-clobber-list , string-literal Returns a TREE_LIST, indicating the clobbers in the order that they appeared. The TREE_VALUE of each node is a STRING_CST. */ static tree cp_parser_asm_clobber_list (cp_parser* parser) { tree clobbers = NULL_TREE; while (true) { tree string_literal; /* Look for the string literal. */ string_literal = cp_parser_string_literal (parser, false, false); /* Add it to the list. */ clobbers = tree_cons (NULL_TREE, string_literal, clobbers); /* If the next token is not a `,', then the list is complete. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_COMMA)) break; /* Consume the `,' token. */ cp_lexer_consume_token (parser->lexer); } return clobbers; } /* Parse an (optional) series of attributes. attributes: attributes attribute attribute: __attribute__ (( attribute-list [opt] )) The return value is as for cp_parser_attribute_list. */ static tree cp_parser_attributes_opt (cp_parser* parser) { tree attributes = NULL_TREE; while (true) { cp_token *token; tree attribute_list; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* If it's not `__attribute__', then we're done. */ if (token->keyword != RID_ATTRIBUTE) break; /* Consume the `__attribute__' keyword. */ cp_lexer_consume_token (parser->lexer); /* Look for the two `(' tokens. */ cp_parser_require (parser, CPP_OPEN_PAREN, "`('"); cp_parser_require (parser, CPP_OPEN_PAREN, "`('"); /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); if (token->type != CPP_CLOSE_PAREN) /* Parse the attribute-list. */ attribute_list = cp_parser_attribute_list (parser); else /* If the next token is a `)', then there is no attribute list. */ attribute_list = NULL; /* Look for the two `)' tokens. */ cp_parser_require (parser, CPP_CLOSE_PAREN, "`)'"); cp_parser_require (parser, CPP_CLOSE_PAREN, "`)'"); /* Add these new attributes to the list. */ attributes = chainon (attributes, attribute_list); } return attributes; } /* Parse an attribute-list. attribute-list: attribute attribute-list , attribute attribute: identifier identifier ( identifier ) identifier ( identifier , expression-list ) identifier ( expression-list ) Returns a TREE_LIST, or NULL_TREE on error. Each node corresponds to an attribute. The TREE_PURPOSE of each node is the identifier indicating which attribute is in use. The TREE_VALUE represents the arguments, if any. */ static tree cp_parser_attribute_list (cp_parser* parser) { tree attribute_list = NULL_TREE; bool save_translate_strings_p = parser->translate_strings_p; parser->translate_strings_p = false; while (true) { cp_token *token; tree identifier; tree attribute; /* Look for the identifier. We also allow keywords here; for example `__attribute__ ((const))' is legal. */ token = cp_lexer_peek_token (parser->lexer); if (token->type == CPP_NAME || token->type == CPP_KEYWORD) { tree arguments = NULL_TREE; /* Consume the token. */ token = cp_lexer_consume_token (parser->lexer); /* Save away the identifier that indicates which attribute this is. */ identifier = token->u.value; attribute = build_tree_list (identifier, NULL_TREE); /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* If it's an `(', then parse the attribute arguments. */ if (token->type == CPP_OPEN_PAREN) { arguments = cp_parser_parenthesized_expression_list (parser, true, /*cast_p=*/false, /*non_constant_p=*/NULL); /* Save the arguments away. */ TREE_VALUE (attribute) = arguments; } if (arguments != error_mark_node) { /* Add this attribute to the list. */ TREE_CHAIN (attribute) = attribute_list; attribute_list = attribute; } token = cp_lexer_peek_token (parser->lexer); } /* Now, look for more attributes. If the next token isn't a `,', we're done. */ if (token->type != CPP_COMMA) break; /* Consume the comma and keep going. */ cp_lexer_consume_token (parser->lexer); } parser->translate_strings_p = save_translate_strings_p; /* We built up the list in reverse order. */ return nreverse (attribute_list); } /* Parse an optional `__extension__' keyword. Returns TRUE if it is present, and FALSE otherwise. *SAVED_PEDANTIC is set to the current value of the PEDANTIC flag, regardless of whether or not the `__extension__' keyword is present. The caller is responsible for restoring the value of the PEDANTIC flag. */ static bool cp_parser_extension_opt (cp_parser* parser, int* saved_pedantic) { /* Save the old value of the PEDANTIC flag. */ *saved_pedantic = pedantic; if (cp_lexer_next_token_is_keyword (parser->lexer, RID_EXTENSION)) { /* Consume the `__extension__' token. */ cp_lexer_consume_token (parser->lexer); /* We're not being pedantic while the `__extension__' keyword is in effect. */ pedantic = 0; return true; } return false; } /* Parse a label declaration. label-declaration: __label__ label-declarator-seq ; label-declarator-seq: identifier , label-declarator-seq identifier */ static void cp_parser_label_declaration (cp_parser* parser) { /* Look for the `__label__' keyword. */ cp_parser_require_keyword (parser, RID_LABEL, "`__label__'"); while (true) { tree identifier; /* Look for an identifier. */ identifier = cp_parser_identifier (parser); /* If we failed, stop. */ if (identifier == error_mark_node) break; /* Declare it as a label. */ finish_label_decl (identifier); /* If the next token is a `;', stop. */ if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON)) break; /* Look for the `,' separating the label declarations. */ cp_parser_require (parser, CPP_COMMA, "`,'"); } /* Look for the final `;'. */ cp_parser_require (parser, CPP_SEMICOLON, "`;'"); } /* Support Functions */ /* Looks up NAME in the current scope, as given by PARSER->SCOPE. NAME should have one of the representations used for an id-expression. If NAME is the ERROR_MARK_NODE, the ERROR_MARK_NODE is returned. If PARSER->SCOPE is a dependent type, then a SCOPE_REF is returned. If NAME is a TEMPLATE_ID_EXPR, then it will be immediately returned; the name was already resolved when the TEMPLATE_ID_EXPR was formed. Abstractly, such entities should not be passed to this function, because they do not need to be looked up, but it is simpler to check for this special case here, rather than at the call-sites. In cases not explicitly covered above, this function returns a DECL, OVERLOAD, or baselink representing the result of the lookup. If there was no entity with the indicated NAME, the ERROR_MARK_NODE is returned. If TAG_TYPE is not NONE_TYPE, it indicates an explicit type keyword (e.g., "struct") that was used. In that case bindings that do not refer to types are ignored. If IS_TEMPLATE is TRUE, bindings that do not refer to templates are ignored. If IS_NAMESPACE is TRUE, bindings that do not refer to namespaces are ignored. If CHECK_DEPENDENCY is TRUE, names are not looked up in dependent types. If AMBIGUOUS_DECLS is non-NULL, *AMBIGUOUS_DECLS is set to a TREE_LIST of candidates if name-lookup results in an ambiguity, and NULL_TREE otherwise. */ static tree cp_parser_lookup_name (cp_parser *parser, tree name, enum tag_types tag_type, bool is_template, bool is_namespace, bool check_dependency, tree *ambiguous_decls) { int flags = 0; tree decl; tree object_type = parser->context->object_type; if (!cp_parser_uncommitted_to_tentative_parse_p (parser)) flags |= LOOKUP_COMPLAIN; /* Assume that the lookup will be unambiguous. */ if (ambiguous_decls) *ambiguous_decls = NULL_TREE; /* Now that we have looked up the name, the OBJECT_TYPE (if any) is no longer valid. Note that if we are parsing tentatively, and the parse fails, OBJECT_TYPE will be automatically restored. */ parser->context->object_type = NULL_TREE; if (name == error_mark_node) return error_mark_node; /* A template-id has already been resolved; there is no lookup to do. */ if (TREE_CODE (name) == TEMPLATE_ID_EXPR) return name; if (BASELINK_P (name)) { gcc_assert (TREE_CODE (BASELINK_FUNCTIONS (name)) == TEMPLATE_ID_EXPR); return name; } /* A BIT_NOT_EXPR is used to represent a destructor. By this point, it should already have been checked to make sure that the name used matches the type being destroyed. */ if (TREE_CODE (name) == BIT_NOT_EXPR) { tree type; /* Figure out to which type this destructor applies. */ if (parser->scope) type = parser->scope; else if (object_type) type = object_type; else type = current_class_type; /* If that's not a class type, there is no destructor. */ if (!type || !CLASS_TYPE_P (type)) return error_mark_node; if (CLASSTYPE_LAZY_DESTRUCTOR (type)) lazily_declare_fn (sfk_destructor, type); if (!CLASSTYPE_DESTRUCTORS (type)) return error_mark_node; /* If it was a class type, return the destructor. */ return CLASSTYPE_DESTRUCTORS (type); } /* By this point, the NAME should be an ordinary identifier. If the id-expression was a qualified name, the qualifying scope is stored in PARSER->SCOPE at this point. */ gcc_assert (TREE_CODE (name) == IDENTIFIER_NODE); /* Perform the lookup. */ if (parser->scope) { bool dependent_p; if (parser->scope == error_mark_node) return error_mark_node; /* If the SCOPE is dependent, the lookup must be deferred until the template is instantiated -- unless we are explicitly looking up names in uninstantiated templates. Even then, we cannot look up the name if the scope is not a class type; it might, for example, be a template type parameter. */ dependent_p = (TYPE_P (parser->scope) && !(parser->in_declarator_p && currently_open_class (parser->scope)) && dependent_type_p (parser->scope)); if ((check_dependency || !CLASS_TYPE_P (parser->scope)) && dependent_p) { if (tag_type) { tree type; /* The resolution to Core Issue 180 says that `struct A::B' should be considered a type-name, even if `A' is dependent. */ type = make_typename_type (parser->scope, name, tag_type, /*complain=*/tf_error); decl = TYPE_NAME (type); } else if (is_template && (cp_parser_next_token_ends_template_argument_p (parser) || cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_PAREN))) decl = make_unbound_class_template (parser->scope, name, NULL_TREE, /*complain=*/tf_error); else decl = build_qualified_name (/*type=*/NULL_TREE, parser->scope, name, is_template); } else { tree pushed_scope = NULL_TREE; /* If PARSER->SCOPE is a dependent type, then it must be a class type, and we must not be checking dependencies; otherwise, we would have processed this lookup above. So that PARSER->SCOPE is not considered a dependent base by lookup_member, we must enter the scope here. */ if (dependent_p) pushed_scope = push_scope (parser->scope); /* If the PARSER->SCOPE is a template specialization, it may be instantiated during name lookup. In that case, errors may be issued. Even if we rollback the current tentative parse, those errors are valid. */ decl = lookup_qualified_name (parser->scope, name, tag_type != none_type, /*complain=*/true); if (pushed_scope) pop_scope (pushed_scope); } parser->qualifying_scope = parser->scope; parser->object_scope = NULL_TREE; } else if (object_type) { tree object_decl = NULL_TREE; /* Look up the name in the scope of the OBJECT_TYPE, unless the OBJECT_TYPE is not a class. */ if (CLASS_TYPE_P (object_type)) /* If the OBJECT_TYPE is a template specialization, it may be instantiated during name lookup. In that case, errors may be issued. Even if we rollback the current tentative parse, those errors are valid. */ object_decl = lookup_member (object_type, name, /*protect=*/0, tag_type != none_type); /* Look it up in the enclosing context, too. */ decl = lookup_name_real (name, tag_type != none_type, /*nonclass=*/0, /*block_p=*/true, is_namespace, flags); parser->object_scope = object_type; parser->qualifying_scope = NULL_TREE; if (object_decl) decl = object_decl; } else { decl = lookup_name_real (name, tag_type != none_type, /*nonclass=*/0, /*block_p=*/true, is_namespace, flags); parser->qualifying_scope = NULL_TREE; parser->object_scope = NULL_TREE; } /* If the lookup failed, let our caller know. */ if (!decl || decl == error_mark_node) return error_mark_node; /* If it's a TREE_LIST, the result of the lookup was ambiguous. */ if (TREE_CODE (decl) == TREE_LIST) { if (ambiguous_decls) *ambiguous_decls = decl; /* The error message we have to print is too complicated for cp_parser_error, so we incorporate its actions directly. */ if (!cp_parser_simulate_error (parser)) { error ("reference to %qD is ambiguous", name); print_candidates (decl); } return error_mark_node; } gcc_assert (DECL_P (decl) || TREE_CODE (decl) == OVERLOAD || TREE_CODE (decl) == SCOPE_REF || TREE_CODE (decl) == UNBOUND_CLASS_TEMPLATE || BASELINK_P (decl)); /* If we have resolved the name of a member declaration, check to see if the declaration is accessible. When the name resolves to set of overloaded functions, accessibility is checked when overload resolution is done. During an explicit instantiation, access is not checked at all, as per [temp.explicit]. */ if (DECL_P (decl)) check_accessibility_of_qualified_id (decl, object_type, parser->scope); return decl; } /* Like cp_parser_lookup_name, but for use in the typical case where CHECK_ACCESS is TRUE, IS_TYPE is FALSE, IS_TEMPLATE is FALSE, IS_NAMESPACE is FALSE, and CHECK_DEPENDENCY is TRUE. */ static tree cp_parser_lookup_name_simple (cp_parser* parser, tree name) { return cp_parser_lookup_name (parser, name, none_type, /*is_template=*/false, /*is_namespace=*/false, /*check_dependency=*/true, /*ambiguous_decls=*/NULL); } /* If DECL is a TEMPLATE_DECL that can be treated like a TYPE_DECL in the current context, return the TYPE_DECL. If TAG_NAME_P is true, the DECL indicates the class being defined in a class-head, or declared in an elaborated-type-specifier. Otherwise, return DECL. */ static tree cp_parser_maybe_treat_template_as_class (tree decl, bool tag_name_p) { /* If the TEMPLATE_DECL is being declared as part of a class-head, the translation from TEMPLATE_DECL to TYPE_DECL occurs: struct A { template <typename T> struct B; }; template <typename T> struct A::B {}; Similarly, in an elaborated-type-specifier: namespace N { struct X{}; } struct A { template <typename T> friend struct N::X; }; However, if the DECL refers to a class type, and we are in the scope of the class, then the name lookup automatically finds the TYPE_DECL created by build_self_reference rather than a TEMPLATE_DECL. For example, in: template <class T> struct S { S s; }; there is no need to handle such case. */ if (DECL_CLASS_TEMPLATE_P (decl) && tag_name_p) return DECL_TEMPLATE_RESULT (decl); return decl; } /* If too many, or too few, template-parameter lists apply to the declarator, issue an error message. Returns TRUE if all went well, and FALSE otherwise. */ static bool cp_parser_check_declarator_template_parameters (cp_parser* parser, cp_declarator *declarator) { unsigned num_templates; /* We haven't seen any classes that involve template parameters yet. */ num_templates = 0; switch (declarator->kind) { case cdk_id: if (declarator->u.id.qualifying_scope) { tree scope; tree member; scope = declarator->u.id.qualifying_scope; member = declarator->u.id.unqualified_name; while (scope && CLASS_TYPE_P (scope)) { /* You're supposed to have one `template <...>' for every template class, but you don't need one for a full specialization. For example: template <class T> struct S{}; template <> struct S<int> { void f(); }; void S<int>::f () {} is correct; there shouldn't be a `template <>' for the definition of `S<int>::f'. */ if (!CLASSTYPE_TEMPLATE_INFO (scope)) /* If SCOPE does not have template information of any kind, then it is not a template, nor is it nested within a template. */ break; if (explicit_class_specialization_p (scope)) break; if (PRIMARY_TEMPLATE_P (CLASSTYPE_TI_TEMPLATE (scope))) ++num_templates; scope = TYPE_CONTEXT (scope); } } else if (TREE_CODE (declarator->u.id.unqualified_name) == TEMPLATE_ID_EXPR) /* If the DECLARATOR has the form `X<y>' then it uses one additional level of template parameters. */ ++num_templates; return cp_parser_check_template_parameters (parser, num_templates); case cdk_function: case cdk_array: case cdk_pointer: case cdk_reference: case cdk_ptrmem: /* APPLE LOCAL blocks 6040305 */ case cdk_block_pointer: return (cp_parser_check_declarator_template_parameters (parser, declarator->declarator)); case cdk_error: return true; default: gcc_unreachable (); } return false; } /* NUM_TEMPLATES were used in the current declaration. If that is invalid, return FALSE and issue an error messages. Otherwise, return TRUE. */ static bool cp_parser_check_template_parameters (cp_parser* parser, unsigned num_templates) { /* If there are more template classes than parameter lists, we have something like: template <class T> void S<T>::R<T>::f (); */ if (parser->num_template_parameter_lists < num_templates) { error ("too few template-parameter-lists"); return false; } /* If there are the same number of template classes and parameter lists, that's OK. */ if (parser->num_template_parameter_lists == num_templates) return true; /* If there are more, but only one more, then we are referring to a member template. That's OK too. */ if (parser->num_template_parameter_lists == num_templates + 1) return true; /* Otherwise, there are too many template parameter lists. We have something like: template <class T> template <class U> void S::f(); */ error ("too many template-parameter-lists"); return false; } /* Parse an optional `::' token indicating that the following name is from the global namespace. If so, PARSER->SCOPE is set to the GLOBAL_NAMESPACE. Otherwise, PARSER->SCOPE is set to NULL_TREE, unless CURRENT_SCOPE_VALID_P is TRUE, in which case it is left alone. Returns the new value of PARSER->SCOPE, if the `::' token is present, and NULL_TREE otherwise. */ static tree cp_parser_global_scope_opt (cp_parser* parser, bool current_scope_valid_p) { cp_token *token; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* If we're looking at a `::' token then we're starting from the global namespace, not our current location. */ if (token->type == CPP_SCOPE) { /* Consume the `::' token. */ cp_lexer_consume_token (parser->lexer); /* Set the SCOPE so that we know where to start the lookup. */ parser->scope = global_namespace; parser->qualifying_scope = global_namespace; parser->object_scope = NULL_TREE; return parser->scope; } else if (!current_scope_valid_p) { parser->scope = NULL_TREE; parser->qualifying_scope = NULL_TREE; parser->object_scope = NULL_TREE; } return NULL_TREE; } /* Returns TRUE if the upcoming token sequence is the start of a constructor declarator. If FRIEND_P is true, the declarator is preceded by the `friend' specifier. */ static bool cp_parser_constructor_declarator_p (cp_parser *parser, bool friend_p) { bool constructor_p; tree type_decl = NULL_TREE; bool nested_name_p; cp_token *next_token; /* The common case is that this is not a constructor declarator, so try to avoid doing lots of work if at all possible. It's not valid declare a constructor at function scope. */ if (parser->in_function_body) return false; /* And only certain tokens can begin a constructor declarator. */ next_token = cp_lexer_peek_token (parser->lexer); if (next_token->type != CPP_NAME && next_token->type != CPP_SCOPE && next_token->type != CPP_NESTED_NAME_SPECIFIER && next_token->type != CPP_TEMPLATE_ID) return false; /* Parse tentatively; we are going to roll back all of the tokens consumed here. */ cp_parser_parse_tentatively (parser); /* Assume that we are looking at a constructor declarator. */ constructor_p = true; /* Look for the optional `::' operator. */ cp_parser_global_scope_opt (parser, /*current_scope_valid_p=*/false); /* Look for the nested-name-specifier. */ nested_name_p = (cp_parser_nested_name_specifier_opt (parser, /*typename_keyword_p=*/false, /*check_dependency_p=*/false, /*type_p=*/false, /*is_declaration=*/false) != NULL_TREE); /* Outside of a class-specifier, there must be a nested-name-specifier. */ if (!nested_name_p && (!at_class_scope_p () || !TYPE_BEING_DEFINED (current_class_type) || friend_p)) constructor_p = false; /* If we still think that this might be a constructor-declarator, look for a class-name. */ if (constructor_p) { /* If we have: template <typename T> struct S { S(); }; template <typename T> S<T>::S (); we must recognize that the nested `S' names a class. Similarly, for: template <typename T> S<T>::S<T> (); we must recognize that the nested `S' names a template. */ type_decl = cp_parser_class_name (parser, /*typename_keyword_p=*/false, /*template_keyword_p=*/false, none_type, /*check_dependency_p=*/false, /*class_head_p=*/false, /*is_declaration=*/false); /* If there was no class-name, then this is not a constructor. */ constructor_p = !cp_parser_error_occurred (parser); } /* If we're still considering a constructor, we have to see a `(', to begin the parameter-declaration-clause, followed by either a `)', an `...', or a decl-specifier. We need to check for a type-specifier to avoid being fooled into thinking that: S::S (f) (int); is a constructor. (It is actually a function named `f' that takes one parameter (of type `int') and returns a value of type `S::S'. */ if (constructor_p && cp_parser_require (parser, CPP_OPEN_PAREN, "`('")) { if (cp_lexer_next_token_is_not (parser->lexer, CPP_CLOSE_PAREN) && cp_lexer_next_token_is_not (parser->lexer, CPP_ELLIPSIS) /* A parameter declaration begins with a decl-specifier, which is either the "attribute" keyword, a storage class specifier, or (usually) a type-specifier. */ && !cp_lexer_next_token_is_decl_specifier_keyword (parser->lexer)) { tree type; tree pushed_scope = NULL_TREE; unsigned saved_num_template_parameter_lists; /* Names appearing in the type-specifier should be looked up in the scope of the class. */ if (current_class_type) type = NULL_TREE; else { type = TREE_TYPE (type_decl); if (TREE_CODE (type) == TYPENAME_TYPE) { type = resolve_typename_type (type, /*only_current_p=*/false); if (type == error_mark_node) { cp_parser_abort_tentative_parse (parser); return false; } } pushed_scope = push_scope (type); } /* Inside the constructor parameter list, surrounding template-parameter-lists do not apply. */ saved_num_template_parameter_lists = parser->num_template_parameter_lists; parser->num_template_parameter_lists = 0; /* Look for the type-specifier. */ cp_parser_type_specifier (parser, CP_PARSER_FLAGS_NONE, /*decl_specs=*/NULL, /*is_declarator=*/true, /*declares_class_or_enum=*/NULL, /*is_cv_qualifier=*/NULL); parser->num_template_parameter_lists = saved_num_template_parameter_lists; /* Leave the scope of the class. */ if (pushed_scope) pop_scope (pushed_scope); constructor_p = !cp_parser_error_occurred (parser); } } else constructor_p = false; /* We did not really want to consume any tokens. */ cp_parser_abort_tentative_parse (parser); return constructor_p; } /* Parse the definition of the function given by the DECL_SPECIFIERS, ATTRIBUTES, and DECLARATOR. The access checks have been deferred; they must be performed once we are in the scope of the function. Returns the function defined. */ static tree cp_parser_function_definition_from_specifiers_and_declarator (cp_parser* parser, cp_decl_specifier_seq *decl_specifiers, tree attributes, const cp_declarator *declarator) { tree fn; bool success_p; /* Begin the function-definition. */ success_p = start_function (decl_specifiers, declarator, attributes); /* The things we're about to see are not directly qualified by any template headers we've seen thus far. */ reset_specialization (); /* If there were names looked up in the decl-specifier-seq that we did not check, check them now. We must wait until we are in the scope of the function to perform the checks, since the function might be a friend. */ perform_deferred_access_checks (); if (!success_p) { /* Skip the entire function. */ cp_parser_skip_to_end_of_block_or_statement (parser); fn = error_mark_node; } /* APPLE LOCAL begin mainline 2006-12-02 5128086 */ \ else if (DECL_INITIAL (current_function_decl) != error_mark_node) { /* Seen already, skip it. An error message has already been output. */ cp_parser_skip_to_end_of_block_or_statement (parser); fn = current_function_decl; current_function_decl = NULL_TREE; /* If this is a function from a class, pop the nested class. */ if (current_class_name) pop_nested_class (); } /* APPLE LOCAL end mainline 2006-12-02 5128086 */ \ else fn = cp_parser_function_definition_after_declarator (parser, /*inline_p=*/false); return fn; } /* Parse the part of a function-definition that follows the declarator. INLINE_P is TRUE iff this function is an inline function defined with a class-specifier. Returns the function defined. */ static tree cp_parser_function_definition_after_declarator (cp_parser* parser, bool inline_p) { tree fn; bool ctor_initializer_p = false; bool saved_in_unbraced_linkage_specification_p; bool saved_in_function_body; unsigned saved_num_template_parameter_lists; saved_in_function_body = parser->in_function_body; parser->in_function_body = true; /* If the next token is `return', then the code may be trying to make use of the "named return value" extension that G++ used to support. */ if (cp_lexer_next_token_is_keyword (parser->lexer, RID_RETURN)) { /* Consume the `return' keyword. */ cp_lexer_consume_token (parser->lexer); /* Look for the identifier that indicates what value is to be returned. */ cp_parser_identifier (parser); /* Issue an error message. */ error ("named return values are no longer supported"); /* Skip tokens until we reach the start of the function body. */ while (true) { cp_token *token = cp_lexer_peek_token (parser->lexer); if (token->type == CPP_OPEN_BRACE || token->type == CPP_EOF || token->type == CPP_PRAGMA_EOL) break; cp_lexer_consume_token (parser->lexer); } } /* The `extern' in `extern "C" void f () { ... }' does not apply to anything declared inside `f'. */ saved_in_unbraced_linkage_specification_p = parser->in_unbraced_linkage_specification_p; parser->in_unbraced_linkage_specification_p = false; /* Inside the function, surrounding template-parameter-lists do not apply. */ saved_num_template_parameter_lists = parser->num_template_parameter_lists; parser->num_template_parameter_lists = 0; /* If the next token is `try', then we are looking at a function-try-block. */ if (cp_lexer_next_token_is_keyword (parser->lexer, RID_TRY)) ctor_initializer_p = cp_parser_function_try_block (parser); /* A function-try-block includes the function-body, so we only do this next part if we're not processing a function-try-block. */ else ctor_initializer_p = cp_parser_ctor_initializer_opt_and_function_body (parser); /* Finish the function. */ fn = finish_function ((ctor_initializer_p ? 1 : 0) | (inline_p ? 2 : 0)); /* Generate code for it, if necessary. */ expand_or_defer_fn (fn); /* Restore the saved values. */ parser->in_unbraced_linkage_specification_p = saved_in_unbraced_linkage_specification_p; parser->num_template_parameter_lists = saved_num_template_parameter_lists; parser->in_function_body = saved_in_function_body; return fn; } /* Parse a template-declaration, assuming that the `export' (and `extern') keywords, if present, has already been scanned. MEMBER_P is as for cp_parser_template_declaration. */ static void cp_parser_template_declaration_after_export (cp_parser* parser, bool member_p) { tree decl = NULL_TREE; VEC (deferred_access_check,gc) *checks; tree parameter_list; bool friend_p = false; bool need_lang_pop; /* Look for the `template' keyword. */ if (!cp_parser_require_keyword (parser, RID_TEMPLATE, "`template'")) return; /* And the `<'. */ if (!cp_parser_require (parser, CPP_LESS, "`<'")) return; if (at_class_scope_p () && current_function_decl) { /* 14.5.2.2 [temp.mem] A local class shall not have member templates. */ error ("invalid declaration of member template in local class"); cp_parser_skip_to_end_of_block_or_statement (parser); return; } /* [temp] A template ... shall not have C linkage. */ if (current_lang_name == lang_name_c) { error ("template with C linkage"); /* Give it C++ linkage to avoid confusing other parts of the front end. */ push_lang_context (lang_name_cplusplus); need_lang_pop = true; } else need_lang_pop = false; /* We cannot perform access checks on the template parameter declarations until we know what is being declared, just as we cannot check the decl-specifier list. */ push_deferring_access_checks (dk_deferred); /* If the next token is `>', then we have an invalid specialization. Rather than complain about an invalid template parameter, issue an error message here. */ if (cp_lexer_next_token_is (parser->lexer, CPP_GREATER)) { cp_parser_error (parser, "invalid explicit specialization"); begin_specialization (); parameter_list = NULL_TREE; } else /* Parse the template parameters. */ parameter_list = cp_parser_template_parameter_list (parser); /* Get the deferred access checks from the parameter list. These will be checked once we know what is being declared, as for a member template the checks must be performed in the scope of the class containing the member. */ checks = get_deferred_access_checks (); /* Look for the `>'. */ cp_parser_skip_to_end_of_template_parameter_list (parser); /* We just processed one more parameter list. */ ++parser->num_template_parameter_lists; /* If the next token is `template', there are more template parameters. */ if (cp_lexer_next_token_is_keyword (parser->lexer, RID_TEMPLATE)) cp_parser_template_declaration_after_export (parser, member_p); else { /* There are no access checks when parsing a template, as we do not know if a specialization will be a friend. */ push_deferring_access_checks (dk_no_check); decl = cp_parser_single_declaration (parser, checks, member_p, &friend_p); pop_deferring_access_checks (); /* If this is a member template declaration, let the front end know. */ if (member_p && !friend_p && decl) { if (TREE_CODE (decl) == TYPE_DECL) cp_parser_check_access_in_redeclaration (decl); decl = finish_member_template_decl (decl); } else if (friend_p && decl && TREE_CODE (decl) == TYPE_DECL) make_friend_class (current_class_type, TREE_TYPE (decl), /*complain=*/true); } /* We are done with the current parameter list. */ --parser->num_template_parameter_lists; pop_deferring_access_checks (); /* Finish up. */ finish_template_decl (parameter_list); /* Register member declarations. */ if (member_p && !friend_p && decl && !DECL_CLASS_TEMPLATE_P (decl)) finish_member_declaration (decl); /* For the erroneous case of a template with C linkage, we pushed an implicit C++ linkage scope; exit that scope now. */ if (need_lang_pop) pop_lang_context (); /* If DECL is a function template, we must return to parse it later. (Even though there is no definition, there might be default arguments that need handling.) */ if (member_p && decl && (TREE_CODE (decl) == FUNCTION_DECL || DECL_FUNCTION_TEMPLATE_P (decl))) TREE_VALUE (parser->unparsed_functions_queues) = tree_cons (NULL_TREE, decl, TREE_VALUE (parser->unparsed_functions_queues)); } /* Perform the deferred access checks from a template-parameter-list. CHECKS is a TREE_LIST of access checks, as returned by get_deferred_access_checks. */ static void cp_parser_perform_template_parameter_access_checks (VEC (deferred_access_check,gc)* checks) { ++processing_template_parmlist; perform_access_checks (checks); --processing_template_parmlist; } /* Parse a `decl-specifier-seq [opt] init-declarator [opt] ;' or `function-definition' sequence. MEMBER_P is true, this declaration appears in a class scope. Returns the DECL for the declared entity. If FRIEND_P is non-NULL, *FRIEND_P is set to TRUE iff the declaration is a friend. */ static tree cp_parser_single_declaration (cp_parser* parser, VEC (deferred_access_check,gc)* checks, bool member_p, bool* friend_p) { int declares_class_or_enum; tree decl = NULL_TREE; cp_decl_specifier_seq decl_specifiers; bool function_definition_p = false; /* This function is only used when processing a template declaration. */ gcc_assert (innermost_scope_kind () == sk_template_parms || innermost_scope_kind () == sk_template_spec); /* Defer access checks until we know what is being declared. */ push_deferring_access_checks (dk_deferred); /* Try the `decl-specifier-seq [opt] init-declarator [opt]' alternative. */ cp_parser_decl_specifier_seq (parser, CP_PARSER_FLAGS_OPTIONAL, &decl_specifiers, &declares_class_or_enum); if (friend_p) *friend_p = cp_parser_friend_p (&decl_specifiers); /* There are no template typedefs. */ if (decl_specifiers.specs[(int) ds_typedef]) { error ("template declaration of %qs", "typedef"); decl = error_mark_node; } /* Gather up the access checks that occurred the decl-specifier-seq. */ stop_deferring_access_checks (); /* Check for the declaration of a template class. */ if (declares_class_or_enum) { if (cp_parser_declares_only_class_p (parser)) { decl = shadow_tag (&decl_specifiers); /* In this case: struct C { friend template <typename T> struct A<T>::B; }; A<T>::B will be represented by a TYPENAME_TYPE, and therefore not recognized by shadow_tag. */ if (friend_p && *friend_p && !decl && decl_specifiers.type && TYPE_P (decl_specifiers.type)) decl = decl_specifiers.type; if (decl && decl != error_mark_node) decl = TYPE_NAME (decl); else decl = error_mark_node; /* Perform access checks for template parameters. */ cp_parser_perform_template_parameter_access_checks (checks); } } /* If it's not a template class, try for a template function. If the next token is a `;', then this declaration does not declare anything. But, if there were errors in the decl-specifiers, then the error might well have come from an attempted class-specifier. In that case, there's no need to warn about a missing declarator. */ if (!decl && (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON) || decl_specifiers.type != error_mark_node)) decl = cp_parser_init_declarator (parser, &decl_specifiers, checks, /*function_definition_allowed_p=*/true, member_p, declares_class_or_enum, &function_definition_p); pop_deferring_access_checks (); /* Clear any current qualification; whatever comes next is the start of something new. */ parser->scope = NULL_TREE; parser->qualifying_scope = NULL_TREE; parser->object_scope = NULL_TREE; /* Look for a trailing `;' after the declaration. */ if (!function_definition_p && (decl == error_mark_node || !cp_parser_require (parser, CPP_SEMICOLON, "`;'"))) cp_parser_skip_to_end_of_block_or_statement (parser); return decl; } /* Parse a cast-expression that is not the operand of a unary "&". */ static tree cp_parser_simple_cast_expression (cp_parser *parser) { return cp_parser_cast_expression (parser, /*address_p=*/false, /*cast_p=*/false); } /* Parse a functional cast to TYPE. Returns an expression representing the cast. */ static tree cp_parser_functional_cast (cp_parser* parser, tree type) { tree expression_list; tree cast; expression_list = cp_parser_parenthesized_expression_list (parser, false, /*cast_p=*/true, /*non_constant_p=*/NULL); cast = build_functional_cast (type, expression_list); /* [expr.const]/1: In an integral constant expression "only type conversions to integral or enumeration type can be used". */ if (TREE_CODE (type) == TYPE_DECL) type = TREE_TYPE (type); if (cast != error_mark_node && !cast_valid_in_integral_constant_expression_p (type) && (cp_parser_non_integral_constant_expression (parser, "a call to a constructor"))) return error_mark_node; return cast; } /* Save the tokens that make up the body of a member function defined in a class-specifier. The DECL_SPECIFIERS and DECLARATOR have already been parsed. The ATTRIBUTES are any GNU "__attribute__" specifiers applied to the declaration. Returns the FUNCTION_DECL for the member function. */ static tree cp_parser_save_member_function_body (cp_parser* parser, cp_decl_specifier_seq *decl_specifiers, cp_declarator *declarator, tree attributes) { cp_token *first; cp_token *last; tree fn; /* Create the function-declaration. */ fn = start_method (decl_specifiers, declarator, attributes); /* If something went badly wrong, bail out now. */ if (fn == error_mark_node) { /* If there's a function-body, skip it. */ if (cp_parser_token_starts_function_definition_p (cp_lexer_peek_token (parser->lexer))) cp_parser_skip_to_end_of_block_or_statement (parser); return error_mark_node; } /* Remember it, if there default args to post process. */ cp_parser_save_default_args (parser, fn); /* Save away the tokens that make up the body of the function. */ first = parser->lexer->next_token; cp_parser_cache_group (parser, CPP_CLOSE_BRACE, /*depth=*/0); /* Handle function try blocks. */ while (cp_lexer_next_token_is_keyword (parser->lexer, RID_CATCH)) cp_parser_cache_group (parser, CPP_CLOSE_BRACE, /*depth=*/0); last = parser->lexer->next_token; /* Save away the inline definition; we will process it when the class is complete. */ DECL_PENDING_INLINE_INFO (fn) = cp_token_cache_new (first, last); DECL_PENDING_INLINE_P (fn) = 1; /* We need to know that this was defined in the class, so that friend templates are handled correctly. */ DECL_INITIALIZED_IN_CLASS_P (fn) = 1; /* We're done with the inline definition. */ finish_method (fn); /* Add FN to the queue of functions to be parsed later. */ TREE_VALUE (parser->unparsed_functions_queues) = tree_cons (NULL_TREE, fn, TREE_VALUE (parser->unparsed_functions_queues)); return fn; } /* Parse a template-argument-list, as well as the trailing ">" (but not the opening ">"). See cp_parser_template_argument_list for the return value. */ static tree cp_parser_enclosed_template_argument_list (cp_parser* parser) { tree arguments; tree saved_scope; tree saved_qualifying_scope; tree saved_object_scope; bool saved_greater_than_is_operator_p; bool saved_skip_evaluation; /* [temp.names] When parsing a template-id, the first non-nested `>' is taken as the end of the template-argument-list rather than a greater-than operator. */ saved_greater_than_is_operator_p = parser->greater_than_is_operator_p; parser->greater_than_is_operator_p = false; /* Parsing the argument list may modify SCOPE, so we save it here. */ saved_scope = parser->scope; saved_qualifying_scope = parser->qualifying_scope; saved_object_scope = parser->object_scope; /* We need to evaluate the template arguments, even though this template-id may be nested within a "sizeof". */ saved_skip_evaluation = skip_evaluation; skip_evaluation = false; /* Parse the template-argument-list itself. */ if (cp_lexer_next_token_is (parser->lexer, CPP_GREATER)) arguments = NULL_TREE; else arguments = cp_parser_template_argument_list (parser); /* Look for the `>' that ends the template-argument-list. If we find a '>>' instead, it's probably just a typo. */ if (cp_lexer_next_token_is (parser->lexer, CPP_RSHIFT)) { if (!saved_greater_than_is_operator_p) { /* If we're in a nested template argument list, the '>>' has to be a typo for '> >'. We emit the error message, but we continue parsing and we push a '>' as next token, so that the argument list will be parsed correctly. Note that the global source location is still on the token before the '>>', so we need to say explicitly where we want it. */ cp_token *token = cp_lexer_peek_token (parser->lexer); error ("%H%<>>%> should be %<> >%> " "within a nested template argument list", &token->location); /* ??? Proper recovery should terminate two levels of template argument list here. */ token->type = CPP_GREATER; } else { /* If this is not a nested template argument list, the '>>' is a typo for '>'. Emit an error message and continue. Same deal about the token location, but here we can get it right by consuming the '>>' before issuing the diagnostic. */ cp_lexer_consume_token (parser->lexer); error ("spurious %<>>%>, use %<>%> to terminate " "a template argument list"); } } else cp_parser_skip_to_end_of_template_parameter_list (parser); /* The `>' token might be a greater-than operator again now. */ parser->greater_than_is_operator_p = saved_greater_than_is_operator_p; /* Restore the SAVED_SCOPE. */ parser->scope = saved_scope; parser->qualifying_scope = saved_qualifying_scope; parser->object_scope = saved_object_scope; skip_evaluation = saved_skip_evaluation; return arguments; } /* MEMBER_FUNCTION is a member function, or a friend. If default arguments, or the body of the function have not yet been parsed, parse them now. */ static void cp_parser_late_parsing_for_member (cp_parser* parser, tree member_function) { /* If this member is a template, get the underlying FUNCTION_DECL. */ if (DECL_FUNCTION_TEMPLATE_P (member_function)) member_function = DECL_TEMPLATE_RESULT (member_function); /* There should not be any class definitions in progress at this point; the bodies of members are only parsed outside of all class definitions. */ gcc_assert (parser->num_classes_being_defined == 0); /* While we're parsing the member functions we might encounter more classes. We want to handle them right away, but we don't want them getting mixed up with functions that are currently in the queue. */ parser->unparsed_functions_queues = tree_cons (NULL_TREE, NULL_TREE, parser->unparsed_functions_queues); /* Make sure that any template parameters are in scope. */ maybe_begin_member_template_processing (member_function); /* If the body of the function has not yet been parsed, parse it now. */ if (DECL_PENDING_INLINE_P (member_function)) { tree function_scope; cp_token_cache *tokens; /* The function is no longer pending; we are processing it. */ tokens = DECL_PENDING_INLINE_INFO (member_function); DECL_PENDING_INLINE_INFO (member_function) = NULL; DECL_PENDING_INLINE_P (member_function) = 0; /* If this is a local class, enter the scope of the containing function. */ function_scope = current_function_decl; if (function_scope) push_function_context_to (function_scope); /* Push the body of the function onto the lexer stack. */ cp_parser_push_lexer_for_tokens (parser, tokens); /* Let the front end know that we going to be defining this function. */ start_preparsed_function (member_function, NULL_TREE, SF_PRE_PARSED | SF_INCLASS_INLINE); /* Don't do access checking if it is a templated function. */ if (processing_template_decl) push_deferring_access_checks (dk_no_check); /* Now, parse the body of the function. */ cp_parser_function_definition_after_declarator (parser, /*inline_p=*/true); if (processing_template_decl) pop_deferring_access_checks (); /* Leave the scope of the containing function. */ if (function_scope) pop_function_context_from (function_scope); cp_parser_pop_lexer (parser); } /* Remove any template parameters from the symbol table. */ maybe_end_member_template_processing (); /* Restore the queue. */ parser->unparsed_functions_queues = TREE_CHAIN (parser->unparsed_functions_queues); } /* If DECL contains any default args, remember it on the unparsed functions queue. */ static void cp_parser_save_default_args (cp_parser* parser, tree decl) { tree probe; for (probe = TYPE_ARG_TYPES (TREE_TYPE (decl)); probe; probe = TREE_CHAIN (probe)) if (TREE_PURPOSE (probe)) { TREE_PURPOSE (parser->unparsed_functions_queues) = tree_cons (current_class_type, decl, TREE_PURPOSE (parser->unparsed_functions_queues)); break; } } /* FN is a FUNCTION_DECL which may contains a parameter with an unparsed DEFAULT_ARG. Parse the default args now. This function assumes that the current scope is the scope in which the default argument should be processed. */ static void cp_parser_late_parsing_default_args (cp_parser *parser, tree fn) { bool saved_local_variables_forbidden_p; tree parm; /* While we're parsing the default args, we might (due to the statement expression extension) encounter more classes. We want to handle them right away, but we don't want them getting mixed up with default args that are currently in the queue. */ parser->unparsed_functions_queues = tree_cons (NULL_TREE, NULL_TREE, parser->unparsed_functions_queues); /* Local variable names (and the `this' keyword) may not appear in a default argument. */ saved_local_variables_forbidden_p = parser->local_variables_forbidden_p; parser->local_variables_forbidden_p = true; for (parm = TYPE_ARG_TYPES (TREE_TYPE (fn)); parm; parm = TREE_CHAIN (parm)) { cp_token_cache *tokens; tree default_arg = TREE_PURPOSE (parm); tree parsed_arg; VEC(tree,gc) *insts; tree copy; unsigned ix; if (!default_arg) continue; if (TREE_CODE (default_arg) != DEFAULT_ARG) /* This can happen for a friend declaration for a function already declared with default arguments. */ continue; /* Push the saved tokens for the default argument onto the parser's lexer stack. */ tokens = DEFARG_TOKENS (default_arg); cp_parser_push_lexer_for_tokens (parser, tokens); /* Parse the assignment-expression. */ parsed_arg = cp_parser_assignment_expression (parser, /*cast_p=*/false); if (!processing_template_decl) parsed_arg = check_default_argument (TREE_VALUE (parm), parsed_arg); TREE_PURPOSE (parm) = parsed_arg; /* Update any instantiations we've already created. */ for (insts = DEFARG_INSTANTIATIONS (default_arg), ix = 0; VEC_iterate (tree, insts, ix, copy); ix++) TREE_PURPOSE (copy) = parsed_arg; /* If the token stream has not been completely used up, then there was extra junk after the end of the default argument. */ if (!cp_lexer_next_token_is (parser->lexer, CPP_EOF)) cp_parser_error (parser, "expected %<,%>"); /* Revert to the main lexer. */ cp_parser_pop_lexer (parser); } /* Make sure no default arg is missing. */ check_default_args (fn); /* Restore the state of local_variables_forbidden_p. */ parser->local_variables_forbidden_p = saved_local_variables_forbidden_p; /* Restore the queue. */ parser->unparsed_functions_queues = TREE_CHAIN (parser->unparsed_functions_queues); } /* Parse the operand of `sizeof' (or a similar operator). Returns either a TYPE or an expression, depending on the form of the input. The KEYWORD indicates which kind of expression we have encountered. */ static tree cp_parser_sizeof_operand (cp_parser* parser, enum rid keyword) { static const char *format; tree expr = NULL_TREE; const char *saved_message; bool saved_integral_constant_expression_p; bool saved_non_integral_constant_expression_p; /* Initialize FORMAT the first time we get here. */ if (!format) format = "types may not be defined in '%s' expressions"; /* Types cannot be defined in a `sizeof' expression. Save away the old message. */ saved_message = parser->type_definition_forbidden_message; /* And create the new one. */ parser->type_definition_forbidden_message = XNEWVEC (const char, strlen (format) + strlen (IDENTIFIER_POINTER (ridpointers[keyword])) + 1 /* `\0' */); sprintf ((char *) parser->type_definition_forbidden_message, format, IDENTIFIER_POINTER (ridpointers[keyword])); /* The restrictions on constant-expressions do not apply inside sizeof expressions. */ saved_integral_constant_expression_p = parser->integral_constant_expression_p; saved_non_integral_constant_expression_p = parser->non_integral_constant_expression_p; parser->integral_constant_expression_p = false; /* Do not actually evaluate the expression. */ ++skip_evaluation; /* If it's a `(', then we might be looking at the type-id construction. */ if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN)) { tree type; bool saved_in_type_id_in_expr_p; /* We can't be sure yet whether we're looking at a type-id or an expression. */ cp_parser_parse_tentatively (parser); /* Consume the `('. */ cp_lexer_consume_token (parser->lexer); /* Parse the type-id. */ saved_in_type_id_in_expr_p = parser->in_type_id_in_expr_p; parser->in_type_id_in_expr_p = true; type = cp_parser_type_id (parser); parser->in_type_id_in_expr_p = saved_in_type_id_in_expr_p; /* Now, look for the trailing `)'. */ cp_parser_require (parser, CPP_CLOSE_PAREN, "%<)%>"); /* If all went well, then we're done. */ if (cp_parser_parse_definitely (parser)) { cp_decl_specifier_seq decl_specs; /* Build a trivial decl-specifier-seq. */ clear_decl_specs (&decl_specs); decl_specs.type = type; /* Call grokdeclarator to figure out what type this is. */ expr = grokdeclarator (NULL, &decl_specs, TYPENAME, /*initialized=*/0, /*attrlist=*/NULL); } } /* If the type-id production did not work out, then we must be looking at the unary-expression production. */ if (!expr) expr = cp_parser_unary_expression (parser, /*address_p=*/false, /*cast_p=*/false); /* Go back to evaluating expressions. */ --skip_evaluation; /* Free the message we created. */ free ((char *) parser->type_definition_forbidden_message); /* And restore the old one. */ parser->type_definition_forbidden_message = saved_message; parser->integral_constant_expression_p = saved_integral_constant_expression_p; parser->non_integral_constant_expression_p = saved_non_integral_constant_expression_p; return expr; } /* If the current declaration has no declarator, return true. */ static bool cp_parser_declares_only_class_p (cp_parser *parser) { /* If the next token is a `;' or a `,' then there is no declarator. */ return (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON) || cp_lexer_next_token_is (parser->lexer, CPP_COMMA)); } /* Update the DECL_SPECS to reflect the storage class indicated by KEYWORD. */ static void cp_parser_set_storage_class (cp_parser *parser, cp_decl_specifier_seq *decl_specs, enum rid keyword) { cp_storage_class storage_class; if (parser->in_unbraced_linkage_specification_p) { error ("invalid use of %qD in linkage specification", ridpointers[keyword]); return; } else if (decl_specs->storage_class != sc_none) { decl_specs->conflicting_specifiers_p = true; return; } if ((keyword == RID_EXTERN || keyword == RID_STATIC) && decl_specs->specs[(int) ds_thread]) { error ("%<__thread%> before %qD", ridpointers[keyword]); decl_specs->specs[(int) ds_thread] = 0; } switch (keyword) { case RID_AUTO: storage_class = sc_auto; break; case RID_REGISTER: storage_class = sc_register; break; case RID_STATIC: storage_class = sc_static; break; case RID_EXTERN: storage_class = sc_extern; break; case RID_MUTABLE: storage_class = sc_mutable; break; default: gcc_unreachable (); } decl_specs->storage_class = storage_class; /* A storage class specifier cannot be applied alongside a typedef specifier. If there is a typedef specifier present then set conflicting_specifiers_p which will trigger an error later on in grokdeclarator. */ if (decl_specs->specs[(int)ds_typedef]) decl_specs->conflicting_specifiers_p = true; } /* Update the DECL_SPECS to reflect the TYPE_SPEC. If USER_DEFINED_P is true, the type is a user-defined type; otherwise it is a built-in type specified by a keyword. */ static void cp_parser_set_decl_spec_type (cp_decl_specifier_seq *decl_specs, tree type_spec, bool user_defined_p) { decl_specs->any_specifiers_p = true; /* If the user tries to redeclare bool or wchar_t (with, for example, in "typedef int wchar_t;") we remember that this is what happened. In system headers, we ignore these declarations so that G++ can work with system headers that are not C++-safe. */ if (decl_specs->specs[(int) ds_typedef] && !user_defined_p && (type_spec == boolean_type_node || type_spec == wchar_type_node) && (decl_specs->type || decl_specs->specs[(int) ds_long] || decl_specs->specs[(int) ds_short] || decl_specs->specs[(int) ds_unsigned] || decl_specs->specs[(int) ds_signed])) { decl_specs->redefined_builtin_type = type_spec; if (!decl_specs->type) { decl_specs->type = type_spec; decl_specs->user_defined_type_p = false; } } else if (decl_specs->type) decl_specs->multiple_types_p = true; else { decl_specs->type = type_spec; decl_specs->user_defined_type_p = user_defined_p; decl_specs->redefined_builtin_type = NULL_TREE; } } /* DECL_SPECIFIERS is the representation of a decl-specifier-seq. Returns TRUE iff `friend' appears among the DECL_SPECIFIERS. */ static bool cp_parser_friend_p (const cp_decl_specifier_seq *decl_specifiers) { return decl_specifiers->specs[(int) ds_friend] != 0; } /* If the next token is of the indicated TYPE, consume it. Otherwise, issue an error message indicating that TOKEN_DESC was expected. Returns the token consumed, if the token had the appropriate type. Otherwise, returns NULL. */ static cp_token * cp_parser_require (cp_parser* parser, enum cpp_ttype type, const char* token_desc) { if (cp_lexer_next_token_is (parser->lexer, type)) return cp_lexer_consume_token (parser->lexer); else { /* Output the MESSAGE -- unless we're parsing tentatively. */ if (!cp_parser_simulate_error (parser)) { char *message = concat ("expected ", token_desc, NULL); cp_parser_error (parser, message); free (message); } return NULL; } } /* An error message is produced if the next token is not '>'. All further tokens are skipped until the desired token is found or '{', '}', ';' or an unbalanced ')' or ']'. */ static void cp_parser_skip_to_end_of_template_parameter_list (cp_parser* parser) { /* Current level of '< ... >'. */ unsigned level = 0; /* Ignore '<' and '>' nested inside '( ... )' or '[ ... ]'. */ unsigned nesting_depth = 0; /* Are we ready, yet? If not, issue error message. */ if (cp_parser_require (parser, CPP_GREATER, "%<>%>")) return; /* Skip tokens until the desired token is found. */ while (true) { /* Peek at the next token. */ switch (cp_lexer_peek_token (parser->lexer)->type) { case CPP_LESS: if (!nesting_depth) ++level; break; case CPP_GREATER: if (!nesting_depth && level-- == 0) { /* We've reached the token we want, consume it and stop. */ cp_lexer_consume_token (parser->lexer); return; } break; case CPP_OPEN_PAREN: case CPP_OPEN_SQUARE: ++nesting_depth; break; case CPP_CLOSE_PAREN: case CPP_CLOSE_SQUARE: if (nesting_depth-- == 0) return; break; case CPP_EOF: case CPP_PRAGMA_EOL: case CPP_SEMICOLON: case CPP_OPEN_BRACE: case CPP_CLOSE_BRACE: /* The '>' was probably forgotten, don't look further. */ return; default: break; } /* Consume this token. */ cp_lexer_consume_token (parser->lexer); } } /* If the next token is the indicated keyword, consume it. Otherwise, issue an error message indicating that TOKEN_DESC was expected. Returns the token consumed, if the token had the appropriate type. Otherwise, returns NULL. */ static cp_token * cp_parser_require_keyword (cp_parser* parser, enum rid keyword, const char* token_desc) { cp_token *token = cp_parser_require (parser, CPP_KEYWORD, token_desc); if (token && token->keyword != keyword) { dyn_string_t error_msg; /* Format the error message. */ error_msg = dyn_string_new (0); dyn_string_append_cstr (error_msg, "expected "); dyn_string_append_cstr (error_msg, token_desc); cp_parser_error (parser, error_msg->s); dyn_string_delete (error_msg); return NULL; } return token; } /* Returns TRUE iff TOKEN is a token that can begin the body of a function-definition. */ static bool cp_parser_token_starts_function_definition_p (cp_token* token) { return (/* An ordinary function-body begins with an `{'. */ token->type == CPP_OPEN_BRACE /* A ctor-initializer begins with a `:'. */ || token->type == CPP_COLON /* A function-try-block begins with `try'. */ || token->keyword == RID_TRY /* The named return value extension begins with `return'. */ || token->keyword == RID_RETURN); } /* Returns TRUE iff the next token is the ":" or "{" beginning a class definition. */ static bool cp_parser_next_token_starts_class_definition_p (cp_parser *parser) { cp_token *token; token = cp_lexer_peek_token (parser->lexer); return (token->type == CPP_OPEN_BRACE || token->type == CPP_COLON); } /* Returns TRUE iff the next token is the "," or ">" ending a template-argument. */ static bool cp_parser_next_token_ends_template_argument_p (cp_parser *parser) { cp_token *token; token = cp_lexer_peek_token (parser->lexer); return (token->type == CPP_COMMA || token->type == CPP_GREATER); } /* Returns TRUE iff the n-th token is a "<", or the n-th is a "[" and the (n+1)-th is a ":" (which is a possible digraph typo for "< ::"). */ static bool cp_parser_nth_token_starts_template_argument_list_p (cp_parser * parser, size_t n) { cp_token *token; token = cp_lexer_peek_nth_token (parser->lexer, n); if (token->type == CPP_LESS) return true; /* Check for the sequence `<::' in the original code. It would be lexed as `[:', where `[' is a digraph, and there is no whitespace before `:'. */ if (token->type == CPP_OPEN_SQUARE && token->flags & DIGRAPH) { cp_token *token2; token2 = cp_lexer_peek_nth_token (parser->lexer, n+1); if (token2->type == CPP_COLON && !(token2->flags & PREV_WHITE)) return true; } return false; } /* Returns the kind of tag indicated by TOKEN, if it is a class-key, or none_type otherwise. */ static enum tag_types cp_parser_token_is_class_key (cp_token* token) { switch (token->keyword) { case RID_CLASS: return class_type; case RID_STRUCT: return record_type; case RID_UNION: return union_type; default: return none_type; } } /* Issue an error message if the CLASS_KEY does not match the TYPE. */ static void cp_parser_check_class_key (enum tag_types class_key, tree type) { if ((TREE_CODE (type) == UNION_TYPE) != (class_key == union_type)) pedwarn ("%qs tag used in naming %q#T", class_key == union_type ? "union" : class_key == record_type ? "struct" : "class", type); } /* Issue an error message if DECL is redeclared with different access than its original declaration [class.access.spec/3]. This applies to nested classes and nested class templates. [class.mem/1]. */ static void cp_parser_check_access_in_redeclaration (tree decl) { if (!CLASS_TYPE_P (TREE_TYPE (decl))) return; if ((TREE_PRIVATE (decl) != (current_access_specifier == access_private_node)) || (TREE_PROTECTED (decl) != (current_access_specifier == access_protected_node))) error ("%qD redeclared with different access", decl); } /* Look for the `template' keyword, as a syntactic disambiguator. Return TRUE iff it is present, in which case it will be consumed. */ static bool cp_parser_optional_template_keyword (cp_parser *parser) { if (cp_lexer_next_token_is_keyword (parser->lexer, RID_TEMPLATE)) { /* The `template' keyword can only be used within templates; outside templates the parser can always figure out what is a template and what is not. */ if (!processing_template_decl) { error ("%<template%> (as a disambiguator) is only allowed " "within templates"); /* If this part of the token stream is rescanned, the same error message would be generated. So, we purge the token from the stream. */ cp_lexer_purge_token (parser->lexer); return false; } else { /* Consume the `template' keyword. */ cp_lexer_consume_token (parser->lexer); return true; } } return false; } /* The next token is a CPP_NESTED_NAME_SPECIFIER. Consume the token, set PARSER->SCOPE, and perform other related actions. */ static void cp_parser_pre_parsed_nested_name_specifier (cp_parser *parser) { int i; struct tree_check *check_value; deferred_access_check *chk; VEC (deferred_access_check,gc) *checks; /* Get the stored value. */ check_value = cp_lexer_consume_token (parser->lexer)->u.tree_check_value; /* Perform any access checks that were deferred. */ checks = check_value->checks; if (checks) { for (i = 0 ; VEC_iterate (deferred_access_check, checks, i, chk) ; ++i) { perform_or_defer_access_check (chk->binfo, chk->decl, chk->diag_decl); } } /* Set the scope from the stored value. */ parser->scope = check_value->value; parser->qualifying_scope = check_value->qualifying_scope; parser->object_scope = NULL_TREE; } /* Consume tokens up through a non-nested END token. */ static void cp_parser_cache_group (cp_parser *parser, enum cpp_ttype end, unsigned depth) { while (true) { cp_token *token; /* Abort a parenthesized expression if we encounter a brace. */ if ((end == CPP_CLOSE_PAREN || depth == 0) && cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON)) return; /* If we've reached the end of the file, stop. */ if (cp_lexer_next_token_is (parser->lexer, CPP_EOF) || (end != CPP_PRAGMA_EOL && cp_lexer_next_token_is (parser->lexer, CPP_PRAGMA_EOL))) return; /* Consume the next token. */ token = cp_lexer_consume_token (parser->lexer); /* See if it starts a new group. */ if (token->type == CPP_OPEN_BRACE) { cp_parser_cache_group (parser, CPP_CLOSE_BRACE, depth + 1); if (depth == 0) return; } else if (token->type == CPP_OPEN_PAREN) cp_parser_cache_group (parser, CPP_CLOSE_PAREN, depth + 1); else if (token->type == CPP_PRAGMA) cp_parser_cache_group (parser, CPP_PRAGMA_EOL, depth + 1); else if (token->type == end) return; } } /* Begin parsing tentatively. We always save tokens while parsing tentatively so that if the tentative parsing fails we can restore the tokens. */ static void cp_parser_parse_tentatively (cp_parser* parser) { /* Enter a new parsing context. */ parser->context = cp_parser_context_new (parser->context); /* Begin saving tokens. */ cp_lexer_save_tokens (parser->lexer); /* In order to avoid repetitive access control error messages, access checks are queued up until we are no longer parsing tentatively. */ push_deferring_access_checks (dk_deferred); } /* Commit to the currently active tentative parse. */ static void cp_parser_commit_to_tentative_parse (cp_parser* parser) { cp_parser_context *context; cp_lexer *lexer; /* Mark all of the levels as committed. */ lexer = parser->lexer; for (context = parser->context; context->next; context = context->next) { if (context->status == CP_PARSER_STATUS_KIND_COMMITTED) break; context->status = CP_PARSER_STATUS_KIND_COMMITTED; while (!cp_lexer_saving_tokens (lexer)) lexer = lexer->next; cp_lexer_commit_tokens (lexer); } } /* Abort the currently active tentative parse. All consumed tokens will be rolled back, and no diagnostics will be issued. */ static void cp_parser_abort_tentative_parse (cp_parser* parser) { cp_parser_simulate_error (parser); /* Now, pretend that we want to see if the construct was successfully parsed. */ cp_parser_parse_definitely (parser); } /* Stop parsing tentatively. If a parse error has occurred, restore the token stream. Otherwise, commit to the tokens we have consumed. Returns true if no error occurred; false otherwise. */ static bool cp_parser_parse_definitely (cp_parser* parser) { bool error_occurred; cp_parser_context *context; /* Remember whether or not an error occurred, since we are about to destroy that information. */ error_occurred = cp_parser_error_occurred (parser); /* Remove the topmost context from the stack. */ context = parser->context; parser->context = context->next; /* If no parse errors occurred, commit to the tentative parse. */ if (!error_occurred) { /* Commit to the tokens read tentatively, unless that was already done. */ if (context->status != CP_PARSER_STATUS_KIND_COMMITTED) cp_lexer_commit_tokens (parser->lexer); pop_to_parent_deferring_access_checks (); } /* Otherwise, if errors occurred, roll back our state so that things are just as they were before we began the tentative parse. */ else { cp_lexer_rollback_tokens (parser->lexer); pop_deferring_access_checks (); } /* Add the context to the front of the free list. */ context->next = cp_parser_context_free_list; cp_parser_context_free_list = context; return !error_occurred; } /* Returns true if we are parsing tentatively and are not committed to this tentative parse. */ static bool cp_parser_uncommitted_to_tentative_parse_p (cp_parser* parser) { return (cp_parser_parsing_tentatively (parser) && parser->context->status != CP_PARSER_STATUS_KIND_COMMITTED); } /* Returns nonzero iff an error has occurred during the most recent tentative parse. */ static bool cp_parser_error_occurred (cp_parser* parser) { return (cp_parser_parsing_tentatively (parser) && parser->context->status == CP_PARSER_STATUS_KIND_ERROR); } /* Returns nonzero if GNU extensions are allowed. */ static bool cp_parser_allow_gnu_extensions_p (cp_parser* parser) { return parser->allow_gnu_extensions_p; } /* Objective-C++ Productions */ /* APPLE LOCAL begin CW asm blocks */ /* This is the section of CW-asm-specific parsing functions. */ static tree cp_parser_iasm_compound_statement (cp_parser *parser) { tree compound_stmt; iasm_state = iasm_asm; inside_iasm_block = true; iasm_kill_regs = true; /* LLVM LOCAL */ iasm_label_counter = 0; if (!cp_parser_require (parser, CPP_OPEN_BRACE, "`{'")) return error_mark_node; /* Begin the compound-statement. */ compound_stmt = begin_compound_stmt (/*has_no_scope=*/false); /* Parse an (optional) statement-seq. */ cp_parser_iasm_line_seq_opt (parser); /* Finish the compound-statement. */ finish_compound_stmt (compound_stmt); /* Consume the `}'. */ cp_parser_require (parser, CPP_CLOSE_BRACE, "`}'"); /* We're done with the block of asm. */ iasm_end_block (); iasm_state = iasm_none; return compound_stmt; } static void cp_parser_iasm_top_statement (cp_parser *parser) { tree compound_stmt; iasm_state = iasm_asm; inside_iasm_block = true; iasm_kill_regs = true; /* LLVM LOCAL */ iasm_label_counter = 0; /* Begin the compound-statement. */ compound_stmt = begin_compound_stmt (/*has_no_scope=*/false); if (!cp_lexer_iasm_bol (parser->lexer)) { /* Parse a line. */ cp_parser_iasm_line (parser); } /* Finish the compound-statement. */ finish_compound_stmt (compound_stmt); /* We're done with the block of asm. */ iasm_end_block (); iasm_state = iasm_none; } static void cp_parser_iasm_declaration_seq_opt (cp_parser* parser) { cp_token *token = cp_lexer_peek_token (parser->lexer); if (token->type == CPP_NAME && !iasm_typename_or_reserved (token->u.value)) return; /* Scan declarations until there aren't any more. */ while (true) { /* If we're looking at a `}', then we've run out of statements. */ if (cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_BRACE) || cp_lexer_next_token_is (parser->lexer, CPP_EOF)) break; /* Parse a declaration. */ cp_parser_simple_declaration (parser, false); /* CPP_PRAGMA is a #pragma inside a function body, which constitutes a declaration all its own. */ if (token->type == CPP_PRAGMA) cp_parser_pragma (parser, pragma_external); if (iasm_state >= iasm_decls && (cp_lexer_iasm_bol (parser->lexer) || cp_lexer_next_token_is (parser->lexer, CPP_NAME))) break; } } /* Parse an (optional) line-seq. line-seq: line line-seq [opt] line */ static void cp_parser_iasm_line_seq_opt (cp_parser* parser) { /* Scan lines of asm until there aren't any more. */ while (true) { /* If we're looking at a `}', then we've run out of lines. */ if (cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_BRACE) || cp_lexer_next_token_is (parser->lexer, CPP_EOF)) break; /* Parse the line. */ cp_parser_iasm_line (parser); } } static void cp_parser_iasm_line (cp_parser* parser) { cp_parser_iasm_statement_seq_opt (parser); } /* Skip tokens until the end of line is seen. */ static void cp_parser_iasm_skip_to_eol (cp_parser *parser) { while (true) { cp_token *token; /* Do CPP_NUMBER specially to avoid errors on things like ; 1st when doing MS-style asms. */ if ((token = parser->lexer->next_token)->type == CPP_NUMBER || token->type == CPP_HASH || token->type == CPP_PASTE || token->type == CPP_OTHER) ; else /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* If we've run out of tokens, stop. */ if (token->type == CPP_EOF) break; /* If the next token starts a new line, stop. */ if (cp_lexer_iasm_bol (parser->lexer)) break; /* Otherwise, consume the token. */ cp_lexer_consume_token (parser->lexer); } } static void cp_parser_iasm_maybe_skip_comments (cp_parser *parser) { if (flag_ms_asms && cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON)) { /* Eat the ';', then skip rest of characters on this line. */ cp_lexer_consume_token (parser->lexer); cp_parser_iasm_skip_to_eol (parser); } } /* Parse an asm line. The first token cannot be at the beginning of the line. */ static void cp_parser_iasm_statement_seq_opt (cp_parser* parser) { int check; /* Scan statements until there aren't any more. */ while (true) { check = 0; /* Semicolons divide up individual statements. */ if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON)) { /* ; denotes comments in MS-style asms. */ if (flag_ms_asms) { cp_parser_iasm_maybe_skip_comments (parser); return; } cp_lexer_consume_token (parser->lexer); } else if (cp_lexer_next_token_is_keyword (parser->lexer, RID_ASM)) { cp_lexer_consume_token (parser->lexer); } else { /* Parse a single statement. */ cp_parser_iasm_statement (parser); check = 1; } if (cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_BRACE) || cp_lexer_next_token_is (parser->lexer, CPP_EOF) /* We parse at most, one line. */ || cp_lexer_iasm_bol (parser->lexer)) return; if (check && !(cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_BRACE) || cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON) || cp_lexer_next_token_is_keyword (parser->lexer, RID_ASM) || cp_lexer_iasm_bol (parser->lexer))) { cp_parser_error (parser, "expected `;' or `}' `asm' or end-of-line"); } } if (!cp_lexer_iasm_bol (parser->lexer)) cp_parser_iasm_maybe_skip_comments (parser); } /* Build an identifier comprising the string passed and the next token. */ static tree iasm_build_identifier_string (cp_parser* parser, const char* str) { char *buf; int len; tree id; if (strcmp (str, ".") == 0 && (cp_lexer_peek_token (parser->lexer)->flags & PREV_WHITE) == 0) { if (cp_lexer_next_token_is_keyword (parser->lexer, RID_SHORT)) { cp_lexer_consume_token (parser->lexer); return get_identifier (".short"); } if (cp_lexer_next_token_is_keyword (parser->lexer, RID_LONG)) { cp_lexer_consume_token (parser->lexer); return get_identifier (".long"); } } id = cp_parser_iasm_identifier_or_number (parser); len = strlen (str); buf = (char *) alloca (IDENTIFIER_LENGTH (id) + len + 1); memcpy (buf, str, len); memcpy (buf+len, IDENTIFIER_POINTER (id), IDENTIFIER_LENGTH (id)); buf[IDENTIFIER_LENGTH (id) + len] = 0; return get_identifier (buf); } /* Parse a CW asm identifier. Returns an IDENTIFIER_NODE representing the identifier. The CW asm identifieriers include [.+-] as part of the identifier. */ static tree cp_parser_iasm_identifier (cp_parser* parser) { cp_token *token; tree t; const char *str = ""; /* We have to accept certain keywords. */ token = cp_lexer_peek_token (parser->lexer); if (token->flags & NAMED_OP) { const char *s = 0; switch (token->type) { case CPP_AND_AND: s="and"; break; case CPP_AND_EQ: s="and_eq"; break; case CPP_AND: s="bitand"; break; case CPP_OR: s="bitor"; break; case CPP_COMPL: s="compl"; break; case CPP_NOT: s="not"; break; case CPP_NOT_EQ: s="not_eq"; break; case CPP_OR_OR: s="or"; break; case CPP_OR_EQ: s="or_eq"; break; case CPP_XOR: s="xor"; break; case CPP_XOR_EQ: s="xor_eq"; break; default: break; } /* The above list is the entire list of named operators. We can't fail to translate the name. See operator_array in libcpp/init.c. */ gcc_assert (s != 0); cp_lexer_consume_token (parser->lexer); t = get_identifier (s); } else if (token->type == CPP_DOT) { /* .align */ cp_lexer_consume_token (parser->lexer); t = iasm_build_identifier_string (parser, "."); } else if (token->u.value && IASM_SEE_OPCODE (TYPESPEC, token->u.value) == IDENTIFIER) { cp_lexer_consume_token (parser->lexer); t = token->u.value; } else t = cp_parser_identifier (parser); if (t == error_mark_node) return t; token = cp_lexer_peek_token (parser->lexer); switch (token->type) { case CPP_DOT: str = "."; break; case CPP_PLUS: str = "+"; break; case CPP_MINUS: str = "-"; break; case CPP_PLUS_PLUS: str = "++"; break; case CPP_MINUS_MINUS: str = "--"; break; default: return t; } /* If there was whitespace between the identifier and the [.+-] character, then that character can't be part of the identifier. */ if (token->flags & PREV_WHITE) return t; cp_lexer_consume_token (parser->lexer); return iasm_get_identifier (t, str); } static tree cp_parser_iasm_identifier_or_number (cp_parser* parser) { cp_token *token; token = cp_lexer_peek_token (parser->lexer); if (token->type == CPP_NUMBER && TREE_CODE (token->u.value) == INTEGER_CST) { char buf[60]; sprintf (buf, HOST_WIDE_INT_PRINT_UNSIGNED, tree_low_cst (token->u.value, 0)); cp_lexer_consume_token (parser->lexer); return get_identifier (buf); } return cp_parser_identifier (parser); } static tree cp_parser_iasm_maybe_prefix (cp_parser *parser, tree id) { tree prefix_list = NULL_TREE; while (iasm_is_prefix (id)) { if (cp_lexer_iasm_bol (parser->lexer)) break; prefix_list = tree_cons (NULL_TREE, id, prefix_list); id = cp_parser_iasm_identifier (parser); } if (prefix_list) id = tree_cons (NULL_TREE, id, prefix_list); return id; } /* A single statement consists of one or more labels (identified by a leading '@' and/or a trailing ':'), optionally followed by opcode and operands. */ static void cp_parser_iasm_statement (cp_parser* parser) { tree aname, anothername, operands; /* Keep sucking labels from the front of the statement until a non-label is seen. */ while (true) { if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON) || cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_BRACE) || cp_lexer_next_token_is (parser->lexer, CPP_EOF)) break; if (cp_lexer_next_token_is (parser->lexer, CPP_PRAGMA)) { cp_parser_pragma (parser, pragma_compound); } else if (cp_lexer_next_token_is (parser->lexer, CPP_ATSIGN)) { cp_lexer_consume_token (parser->lexer); aname = cp_parser_iasm_identifier_or_number (parser); /* Optional ':' after a label. */ if (cp_lexer_next_token_is (parser->lexer, CPP_COLON)) cp_lexer_consume_token (parser->lexer); iasm_label (aname, true); } else { aname = cp_parser_iasm_identifier (parser); if (cp_lexer_next_token_is (parser->lexer, CPP_COLON)) { cp_lexer_consume_token (parser->lexer); iasm_label (aname, false); } else { enum rid scspec = RID_EXTERN; if (strcmp (IDENTIFIER_POINTER (aname), "entry") == 0) { if (cp_lexer_next_token_is_keyword (parser->lexer, RID_STATIC) || cp_lexer_next_token_is_keyword (parser->lexer, RID_EXTERN)) scspec = cp_lexer_consume_token (parser->lexer)->keyword; anothername = cp_parser_iasm_operand (parser); iasm_entry (scspec, anothername); } else { aname = cp_parser_iasm_maybe_prefix (parser, aname); iasm_in_operands = true; operands = cp_parser_iasm_operands (parser); iasm_stmt (aname, operands, input_line); } if (cp_lexer_iasm_bol (parser->lexer)) return; break; } } if (cp_lexer_iasm_bol (parser->lexer)) return; } cp_parser_iasm_maybe_skip_comments (parser); } /* Eat tokens until we get back to something we recognize. */ static void cp_parser_iasm_skip_to_next_asm (cp_parser *parser) { cp_token *token = cp_lexer_peek_token (parser->lexer); do { if (cp_lexer_iasm_bol (parser->lexer) || token->type == CPP_SEMICOLON || token->type == CPP_CLOSE_BRACE || token->type == CPP_EOF || token->keyword == RID_ASM) return; cp_lexer_consume_token (parser->lexer); } while (1); } static tree cp_parser_iasm_operands (cp_parser *parser) { tree operands = NULL_TREE, operand; while (true) { /* If we're looking at the end of the line, then we've run out of operands. */ if (cp_lexer_iasm_bol (parser->lexer) || cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON) || cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_BRACE) || cp_lexer_next_token_is (parser->lexer, CPP_EOF) || cp_lexer_next_token_is_keyword (parser->lexer, RID_ASM)) break; operand = cp_parser_iasm_operand (parser); if (operand && operand != error_mark_node) { operands = chainon (operands, build_tree_list (NULL_TREE, operand)); if (cp_lexer_next_token_is (parser->lexer, CPP_COMMA)) cp_lexer_consume_token (parser->lexer); } else { cp_parser_iasm_skip_to_next_asm (parser); return NULL_TREE; } } return operands; } static tree cp_parser_iasm_operand (cp_parser *parser) { tree operand; /* Jump into the usual operand precedence stack. */ operand = cp_parser_binary_expression (parser, false); /* Maybe this should go up higher. */ if (BASELINK_P (operand) && TREE_CODE (BASELINK_FUNCTIONS (operand)) == FUNCTION_DECL) { operand = BASELINK_FUNCTIONS (operand); } return operand; } /* Need to handle case of relative branch using: .[+|-]number syntax */ static tree cp_parser_iasm_relative_branch (cp_parser *parser) { cp_token *token; token = cp_lexer_peek_nth_token (parser->lexer, 2); if (token->type == CPP_PLUS || token->type == CPP_MINUS) { const char *str = (token->type == CPP_PLUS) ? ".+" : ".-"; /* consume '.' */ cp_lexer_consume_token (parser->lexer); /* consume '-' or '+' */ cp_lexer_consume_token (parser->lexer); return iasm_build_identifier_string (parser, str); } return error_mark_node; } /* Parse a CW asm-style postfix-expression. postfix-expression: primary-expression postfix-expression [ expression ] postfix-expression ( expression-list [opt] ) simple-type-specifier ( expression-list [opt] ) postfix-expression . template [opt] id-expression postfix-expression -> template [opt] id-expression postfix-expression . pseudo-destructor-name postfix-expression -> pseudo-destructor-name typeid ( expression ) typeid ( type-id ) GNU Extension: postfix-expression: ( type-id ) { initializer-list , [opt] } This extension is a GNU version of the C99 compound-literal construct. (The C99 grammar uses `type-name' instead of `type-id', but they are essentially the same concept.) If ADDRESS_P is true, the postfix expression is the operand of the `&' operator. CAST_P is true if this expression is the target of a cast. Returns a representation of the expression. */ static tree cp_parser_iasm_postfix_expression (cp_parser *parser, bool address_p, bool cast_p) { bool for_offsetof = false; cp_token *token; enum rid keyword; cp_id_kind idk = CP_ID_KIND_NONE; tree postfix_expression = NULL_TREE; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* Some of the productions are determined by keywords. */ keyword = token->keyword; switch (keyword) { case RID_SIZEOF: { tree operand; /* Consume the token. */ cp_lexer_consume_token (parser->lexer); /* Parse the operand. */ operand = cp_parser_sizeof_operand (parser, keyword); postfix_expression = cxx_sizeof_or_alignof_type (operand, SIZEOF_EXPR, true); break; } default: { tree type; /* If the next thing is a simple-type-specifier, we may be looking at a functional cast. We could also be looking at an id-expression. So, we try the functional cast, and if that doesn't work we fall back to the primary-expression. */ cp_parser_parse_tentatively (parser); /* Look for the simple-type-specifier. */ type = cp_parser_simple_type_specifier (parser, CP_PARSER_FLAGS_NONE, /*identifier_p=*/false); /* Parse the cast itself. */ if (!cp_parser_error_occurred (parser)) postfix_expression = cp_parser_functional_cast (parser, type); /* If that worked, we're done. */ if (cp_parser_parse_definitely (parser)) break; if (token->type == CPP_DOT || token->type == CPP_MULT) { postfix_expression = cp_parser_iasm_relative_branch (parser); if (postfix_expression != error_mark_node) break; } /* If the functional-cast didn't work out, try a compound-literal. */ if (cp_parser_allow_gnu_extensions_p (parser) && cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN)) { VEC(constructor_elt,gc) *initializer_list = NULL; bool saved_in_type_id_in_expr_p; cp_parser_parse_tentatively (parser); /* Consume the `('. */ cp_lexer_consume_token (parser->lexer); /* Parse the type. */ saved_in_type_id_in_expr_p = parser->in_type_id_in_expr_p; parser->in_type_id_in_expr_p = true; type = cp_parser_type_id (parser); parser->in_type_id_in_expr_p = saved_in_type_id_in_expr_p; /* Look for the `)'. */ cp_parser_require (parser, CPP_CLOSE_PAREN, "`)'"); /* Look for the `{'. */ cp_parser_require (parser, CPP_OPEN_BRACE, "`{'"); /* If things aren't going well, there's no need to keep going. */ if (!cp_parser_error_occurred (parser)) { bool non_constant_p; /* Parse the initializer-list. */ initializer_list = cp_parser_initializer_list (parser, &non_constant_p); /* Allow a trailing `,'. */ if (cp_lexer_next_token_is (parser->lexer, CPP_COMMA)) cp_lexer_consume_token (parser->lexer); /* Look for the final `}'. */ cp_parser_require (parser, CPP_CLOSE_BRACE, "`}'"); } /* If that worked, we're definitely looking at a compound-literal expression. */ if (cp_parser_parse_definitely (parser)) { /* Warn the user that a compound literal is not allowed in standard C++. */ if (pedantic) pedwarn ("ISO C++ forbids compound-literals"); /* Form the representation of the compound-literal. */ postfix_expression = finish_compound_literal (type, initializer_list); break; } } /* It must be a primary-expression. */ postfix_expression = cp_parser_primary_expression (parser, address_p, cast_p, /*template_arg_p=*/false, &idk); } break; } /* Keep looping until the postfix-expression is complete. */ while (true) { if (idk == CP_ID_KIND_UNQUALIFIED && TREE_CODE (postfix_expression) == IDENTIFIER_NODE && cp_lexer_next_token_is_not (parser->lexer, CPP_OPEN_PAREN)) /* It is not a Koenig lookup function call. */ postfix_expression = unqualified_name_lookup_error (postfix_expression); if (cp_lexer_iasm_bol (parser->lexer)) return postfix_expression; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); switch (token->type) { case CPP_OPEN_SQUARE: postfix_expression = cp_parser_postfix_open_square_expression (parser, postfix_expression, false); idk = CP_ID_KIND_NONE; break; case CPP_OPEN_PAREN: /* postfix-expression ( expression ) */ { tree expr; cp_lexer_consume_token (parser->lexer); expr = cp_parser_binary_expression (parser, false); if (expr == error_mark_node) { postfix_expression = error_mark_node; break; } postfix_expression = iasm_build_register_offset (postfix_expression, expr); cp_parser_require (parser, CPP_CLOSE_PAREN, "`)'"); /* The POSTFIX_EXPRESSION is certainly no longer an id. */ idk = CP_ID_KIND_NONE; } break; case CPP_DOT: case CPP_DEREF: /* postfix-expression . template [opt] id-expression postfix-expression . pseudo-destructor-name postfix-expression -> template [opt] id-expression postfix-expression -> pseudo-destructor-name */ { tree name; bool dependent_p; bool template_p; tree scope = NULL_TREE; enum cpp_ttype token_type = token->type; /* We allow [eax].16 to refer to [eax + 16]. */ if (TREE_CODE (postfix_expression) == BRACKET_EXPR) { cp_token *new_token; new_token = cp_lexer_peek_nth_token (parser->lexer, 2); if (new_token->type == CPP_NUMBER) { /* Consume the `.' or `->' operator. */ cp_lexer_consume_token (parser->lexer); postfix_expression = iasm_build_bracket (postfix_expression, new_token->u.value); cp_lexer_consume_token (parser->lexer); break; } } /* If this is a `->' operator, dereference the pointer. */ if (token->type == CPP_DEREF) postfix_expression = build_x_arrow (postfix_expression); /* Check to see whether or not the expression is type-dependent. */ dependent_p = type_dependent_expression_p (postfix_expression); /* The identifier following the `->' or `.' is not qualified. */ parser->scope = NULL_TREE; parser->qualifying_scope = NULL_TREE; parser->object_scope = NULL_TREE; idk = CP_ID_KIND_NONE; /* Enter the scope corresponding to the type of the object given by the POSTFIX_EXPRESSION. */ if (!dependent_p && TREE_TYPE (postfix_expression) != NULL_TREE) { scope = TREE_TYPE (postfix_expression); /* According to the standard, no expression should ever have reference type. Unfortunately, we do not currently match the standard in this respect in that our internal representation of an expression may have reference type even when the standard says it does not. Therefore, we have to manually obtain the underlying type here. */ scope = non_reference (scope); /* The type of the POSTFIX_EXPRESSION must be complete. */ scope = complete_type_or_else (scope, NULL_TREE); /* Let the name lookup machinery know that we are processing a class member access expression. */ parser->context->object_type = scope; /* If something went wrong, we want to be able to discern that case, as opposed to the case where there was no SCOPE due to the type of expression being dependent. */ if (!scope) scope = error_mark_node; /* If the SCOPE was erroneous, make the various semantic analysis functions exit quickly -- and without issuing additional error messages. */ if (scope == error_mark_node) postfix_expression = error_mark_node; } /* Consume the `.' or `->' operator. */ cp_lexer_consume_token (parser->lexer); /* If the SCOPE is not a scalar type, we are looking at an ordinary class member access expression, rather than a pseudo-destructor-name. */ if (!scope || !SCALAR_TYPE_P (scope)) { template_p = cp_parser_optional_template_keyword (parser); /* Parse the id-expression. */ name = cp_parser_id_expression (parser, template_p, /*check_dependency_p=*/true, /*template_p=*/NULL, /*declarator_p=*/false, /*optional_p=*/false); /* In general, build a SCOPE_REF if the member name is qualified. However, if the name was not dependent and has already been resolved; there is no need to build the SCOPE_REF. For example; struct X { void f(); }; template <typename T> void f(T* t) { t->X::f(); } Even though "t" is dependent, "X::f" is not and has been resolved to a BASELINK; there is no need to include scope information. */ /* But we do need to remember that there was an explicit scope for virtual function calls. */ if (parser->scope) idk = CP_ID_KIND_QUALIFIED; if (name != error_mark_node && !BASELINK_P (name) && parser->scope) { name = build_nt (SCOPE_REF, parser->scope, name); parser->scope = NULL_TREE; parser->qualifying_scope = NULL_TREE; parser->object_scope = NULL_TREE; } if (scope && name && BASELINK_P (name)) adjust_result_of_qualified_name_lookup (name, BINFO_TYPE (BASELINK_BINFO (name)), scope); postfix_expression = iasm_cp_build_component_ref (postfix_expression, name); } /* Otherwise, try the pseudo-destructor-name production. */ else { tree s = NULL_TREE; tree type; /* Parse the pseudo-destructor-name. */ cp_parser_pseudo_destructor_name (parser, &s, &type); /* Form the call. */ postfix_expression = finish_pseudo_destructor_expr (postfix_expression, s, TREE_TYPE (type)); } /* We no longer need to look up names in the scope of the object on the left-hand side of the `.' or `->' operator. */ parser->context->object_type = NULL_TREE; /* Outside of offsetof, these operators may not appear in constant-expressions. */ if (!for_offsetof && (cp_parser_non_integral_constant_expression (parser, token_type == CPP_DEREF ? "'->'" : "`.'"))) postfix_expression = error_mark_node; } break; case CPP_NAME: if (strcasecmp (IDENTIFIER_POINTER (token->u.value), "ptr") == 0) { /* Handle things like: inc dword ptr [eax] */ tree type = postfix_expression; cp_lexer_consume_token (parser->lexer); postfix_expression = cp_parser_iasm_postfix_expression (parser, address_p, cast_p); postfix_expression = iasm_ptr_conv (type, postfix_expression); } default: return postfix_expression; } } /* We should never get here. */ abort (); return error_mark_node; } int iasm_typename_or_reserved (tree value) { tree type_decl; if (IASM_SEE_OPCODE (TYPESPEC, value) == IDENTIFIER) return 0; if (C_IS_RESERVED_WORD (value)) return 1; type_decl = lookup_name_real (value, 0, 0, true, 0, 0); return type_decl && (TREE_CODE (type_decl) == TYPE_DECL || TREE_CODE (type_decl) == NAMESPACE_DECL || TREE_CODE (type_decl) == TEMPLATE_DECL); } /* APPLE LOCAL end CW asm blocks */ /* Parse an Objective-C expression, which feeds into a primary-expression above. objc-expression: objc-message-expression objc-string-literal objc-encode-expression objc-protocol-expression objc-selector-expression Returns a tree representation of the expression. */ static tree cp_parser_objc_expression (cp_parser* parser) { /* Try to figure out what kind of declaration is present. */ cp_token *kwd = cp_lexer_peek_token (parser->lexer); switch (kwd->type) { case CPP_OPEN_SQUARE: return cp_parser_objc_message_expression (parser); case CPP_OBJC_STRING: kwd = cp_lexer_consume_token (parser->lexer); return objc_build_string_object (kwd->u.value); case CPP_KEYWORD: switch (kwd->keyword) { case RID_AT_ENCODE: return cp_parser_objc_encode_expression (parser); case RID_AT_PROTOCOL: return cp_parser_objc_protocol_expression (parser); case RID_AT_SELECTOR: return cp_parser_objc_selector_expression (parser); default: break; } default: error ("misplaced %<@%D%> Objective-C++ construct", kwd->u.value); cp_parser_skip_to_end_of_block_or_statement (parser); } return error_mark_node; } /* Parse an Objective-C message expression. objc-message-expression: [ objc-message-receiver objc-message-args ] Returns a representation of an Objective-C message. */ static tree cp_parser_objc_message_expression (cp_parser* parser) { tree receiver, messageargs; cp_lexer_consume_token (parser->lexer); /* Eat '['. */ receiver = cp_parser_objc_message_receiver (parser); messageargs = cp_parser_objc_message_args (parser); cp_parser_require (parser, CPP_CLOSE_SQUARE, "`]'"); return objc_build_message_expr (build_tree_list (receiver, messageargs)); } /* APPLE LOCAL begin radar 5277239 */ /* Parse an Objective-C dot-syntax class expression. objc-message-expression: class-name '.' class-method-name Returns an objc_property_reference expression. */ static tree cp_parser_objc_reference_expression (cp_parser* parser, tree type_decl) { tree receiver, component; receiver = objc_get_class_reference (TREE_TYPE (type_decl)); cp_lexer_consume_token (parser->lexer); /* Eact '.' */ component = cp_parser_objc_message_args (parser); return objc_build_property_reference_expr (receiver, TREE_PURPOSE (component)); } /* APPLE LOCAL end radar 5277239 */ /* Parse an objc-message-receiver. objc-message-receiver: expression simple-type-specifier Returns a representation of the type or expression. */ static tree cp_parser_objc_message_receiver (cp_parser* parser) { tree rcv; /* An Objective-C message receiver may be either (1) a type or (2) an expression. */ cp_parser_parse_tentatively (parser); rcv = cp_parser_expression (parser, false); if (cp_parser_parse_definitely (parser)) return rcv; rcv = cp_parser_simple_type_specifier (parser, /*decl_specs=*/NULL, CP_PARSER_FLAGS_NONE); return objc_get_class_reference (rcv); } /* Parse the arguments and selectors comprising an Objective-C message. objc-message-args: objc-selector objc-selector-args objc-selector-args , objc-comma-args objc-selector-args: objc-selector [opt] : assignment-expression objc-selector-args objc-selector [opt] : assignment-expression objc-comma-args: assignment-expression objc-comma-args , assignment-expression Returns a TREE_LIST, with TREE_PURPOSE containing a list of selector arguments and TREE_VALUE containing a list of comma arguments. */ static tree cp_parser_objc_message_args (cp_parser* parser) { tree sel_args = NULL_TREE, addl_args = NULL_TREE; bool maybe_unary_selector_p = true; cp_token *token = cp_lexer_peek_token (parser->lexer); while (cp_parser_objc_selector_p (token->type) || token->type == CPP_COLON) { tree selector = NULL_TREE, arg; if (token->type != CPP_COLON) selector = cp_parser_objc_selector (parser); /* Detect if we have a unary selector. */ if (maybe_unary_selector_p && cp_lexer_next_token_is_not (parser->lexer, CPP_COLON)) return build_tree_list (selector, NULL_TREE); maybe_unary_selector_p = false; cp_parser_require (parser, CPP_COLON, "`:'"); arg = cp_parser_assignment_expression (parser, false); sel_args = chainon (sel_args, build_tree_list (selector, arg)); token = cp_lexer_peek_token (parser->lexer); } /* Handle non-selector arguments, if any. */ while (token->type == CPP_COMMA) { tree arg; cp_lexer_consume_token (parser->lexer); arg = cp_parser_assignment_expression (parser, false); addl_args = chainon (addl_args, build_tree_list (NULL_TREE, arg)); token = cp_lexer_peek_token (parser->lexer); } /* APPLE LOCAL begin radar 4294425 */ if (sel_args == NULL_TREE && addl_args == NULL_TREE) { cp_parser_error (parser, "objective-c++ message argument(s) are expected"); return build_tree_list (error_mark_node, error_mark_node); } /* APPLE LOCAL end radar 4294425 */ return build_tree_list (sel_args, addl_args); } /* Parse an Objective-C encode expression. objc-encode-expression: @encode objc-typename Returns an encoded representation of the type argument. */ static tree cp_parser_objc_encode_expression (cp_parser* parser) { tree type; cp_lexer_consume_token (parser->lexer); /* Eat '@encode'. */ cp_parser_require (parser, CPP_OPEN_PAREN, "`('"); type = complete_type (cp_parser_type_id (parser)); cp_parser_require (parser, CPP_CLOSE_PAREN, "`)'"); if (!type) { error ("%<@encode%> must specify a type as an argument"); return error_mark_node; } /* APPLE LOCAL begin radar 4278774 */ if (dependent_type_p (type)) { tree value = build_min (AT_ENCODE_EXPR, size_type_node, type); TREE_READONLY (value) = 1; return value; } /* APPLE LOCAL end radar 4278774 */ return objc_build_encode_expr (type); } /* Parse an Objective-C @defs expression. */ static tree cp_parser_objc_defs_expression (cp_parser *parser) { tree name; cp_lexer_consume_token (parser->lexer); /* Eat '@defs'. */ cp_parser_require (parser, CPP_OPEN_PAREN, "`('"); name = cp_parser_identifier (parser); cp_parser_require (parser, CPP_CLOSE_PAREN, "`)'"); return objc_get_class_ivars (name); } /* Parse an Objective-C protocol expression. objc-protocol-expression: @protocol ( identifier ) Returns a representation of the protocol expression. */ static tree cp_parser_objc_protocol_expression (cp_parser* parser) { tree proto; cp_lexer_consume_token (parser->lexer); /* Eat '@protocol'. */ cp_parser_require (parser, CPP_OPEN_PAREN, "`('"); proto = cp_parser_identifier (parser); cp_parser_require (parser, CPP_CLOSE_PAREN, "`)'"); return objc_build_protocol_expr (proto); } /* Parse an Objective-C selector expression. objc-selector-expression: @selector ( objc-method-signature ) objc-method-signature: objc-selector objc-selector-seq objc-selector-seq: objc-selector : objc-selector-seq objc-selector : Returns a representation of the method selector. */ static tree cp_parser_objc_selector_expression (cp_parser* parser) { tree sel_seq = NULL_TREE; bool maybe_unary_selector_p = true; cp_token *token; cp_lexer_consume_token (parser->lexer); /* Eat '@selector'. */ cp_parser_require (parser, CPP_OPEN_PAREN, "`('"); token = cp_lexer_peek_token (parser->lexer); while (cp_parser_objc_selector_p (token->type) || token->type == CPP_COLON || token->type == CPP_SCOPE) { tree selector = NULL_TREE; if (token->type != CPP_COLON || token->type == CPP_SCOPE) selector = cp_parser_objc_selector (parser); if (cp_lexer_next_token_is_not (parser->lexer, CPP_COLON) && cp_lexer_next_token_is_not (parser->lexer, CPP_SCOPE)) { /* Detect if we have a unary selector. */ if (maybe_unary_selector_p) { sel_seq = selector; goto finish_selector; } else { cp_parser_error (parser, "expected %<:%>"); } } maybe_unary_selector_p = false; token = cp_lexer_consume_token (parser->lexer); if (token->type == CPP_SCOPE) { sel_seq = chainon (sel_seq, build_tree_list (selector, NULL_TREE)); sel_seq = chainon (sel_seq, build_tree_list (NULL_TREE, NULL_TREE)); } else sel_seq = chainon (sel_seq, build_tree_list (selector, NULL_TREE)); token = cp_lexer_peek_token (parser->lexer); } finish_selector: cp_parser_require (parser, CPP_CLOSE_PAREN, "`)'"); return objc_build_selector_expr (sel_seq); } /* Parse a list of identifiers. objc-identifier-list: identifier objc-identifier-list , identifier Returns a TREE_LIST of identifier nodes. */ static tree cp_parser_objc_identifier_list (cp_parser* parser) { tree list = build_tree_list (NULL_TREE, cp_parser_identifier (parser)); cp_token *sep = cp_lexer_peek_token (parser->lexer); while (sep->type == CPP_COMMA) { cp_lexer_consume_token (parser->lexer); /* Eat ','. */ list = chainon (list, build_tree_list (NULL_TREE, cp_parser_identifier (parser))); sep = cp_lexer_peek_token (parser->lexer); } return list; } /* Parse an Objective-C alias declaration. objc-alias-declaration: @compatibility_alias identifier identifier ; This function registers the alias mapping with the Objective-C front-end. It returns nothing. */ static void cp_parser_objc_alias_declaration (cp_parser* parser) { tree alias, orig; cp_lexer_consume_token (parser->lexer); /* Eat '@compatibility_alias'. */ alias = cp_parser_identifier (parser); orig = cp_parser_identifier (parser); objc_declare_alias (alias, orig); cp_parser_consume_semicolon_at_end_of_statement (parser); } /* Parse an Objective-C class forward-declaration. objc-class-declaration: @class objc-identifier-list ; The function registers the forward declarations with the Objective-C front-end. It returns nothing. */ static void cp_parser_objc_class_declaration (cp_parser* parser) { cp_lexer_consume_token (parser->lexer); /* Eat '@class'. */ objc_declare_class (cp_parser_objc_identifier_list (parser)); cp_parser_consume_semicolon_at_end_of_statement (parser); } /* Parse a list of Objective-C protocol references. objc-protocol-refs-opt: objc-protocol-refs [opt] objc-protocol-refs: < objc-identifier-list > Returns a TREE_LIST of identifiers, if any. */ static tree cp_parser_objc_protocol_refs_opt (cp_parser* parser) { tree protorefs = NULL_TREE; if(cp_lexer_next_token_is (parser->lexer, CPP_LESS)) { cp_lexer_consume_token (parser->lexer); /* Eat '<'. */ protorefs = cp_parser_objc_identifier_list (parser); cp_parser_require (parser, CPP_GREATER, "`>'"); } return protorefs; } /* APPLE LOCAL begin radar 5355344 */ /* This routine also parses a list of Objective-C protocol references; except that if list is not valid, it returns FALSE and back-tracks parsing. */ static bool cp_parser_objc_tentative_protocol_refs_opt (cp_parser* parser, tree *protorefs) { *protorefs = NULL_TREE; if(cp_lexer_next_token_is (parser->lexer, CPP_LESS)) { cp_parser_parse_tentatively (parser); cp_lexer_consume_token (parser->lexer); /* Eat '<'. */ *protorefs = cp_parser_objc_identifier_list (parser); if (!cp_objc_protocol_id_list (*protorefs)) { cp_parser_abort_tentative_parse (parser); return false; } if (cp_parser_parse_definitely (parser)) cp_parser_require (parser, CPP_GREATER, "`>'"); } return true; } /* APPLE LOCAL end radar 5355344 */ /* Parse a Objective-C visibility specification. */ static void cp_parser_objc_visibility_spec (cp_parser* parser) { cp_token *vis = cp_lexer_peek_token (parser->lexer); switch (vis->keyword) { case RID_AT_PRIVATE: objc_set_visibility (2); break; case RID_AT_PROTECTED: objc_set_visibility (0); break; case RID_AT_PUBLIC: objc_set_visibility (1); break; /* APPLE LOCAL begin radar 4564694 */ case RID_AT_PACKAGE: objc_set_visibility (3); break; /* APPLE LOCAL end radar 4564694 */ default: return; } /* Eat '@private'/'@protected'/'@public'. */ cp_lexer_consume_token (parser->lexer); } /* Parse an Objective-C method type. */ static void cp_parser_objc_method_type (cp_parser* parser) { objc_set_method_type (cp_lexer_consume_token (parser->lexer)->type == CPP_PLUS ? PLUS_EXPR : MINUS_EXPR); } /* Parse an Objective-C protocol qualifier. */ static tree cp_parser_objc_protocol_qualifiers (cp_parser* parser) { tree quals = NULL_TREE, node; cp_token *token = cp_lexer_peek_token (parser->lexer); node = token->u.value; while (node && TREE_CODE (node) == IDENTIFIER_NODE && (node == ridpointers [(int) RID_IN] || node == ridpointers [(int) RID_OUT] || node == ridpointers [(int) RID_INOUT] || node == ridpointers [(int) RID_BYCOPY] || node == ridpointers [(int) RID_BYREF] || node == ridpointers [(int) RID_ONEWAY])) { quals = tree_cons (NULL_TREE, node, quals); cp_lexer_consume_token (parser->lexer); token = cp_lexer_peek_token (parser->lexer); node = token->u.value; } return quals; } /* Parse an Objective-C typename. */ static tree cp_parser_objc_typename (cp_parser* parser) { tree typename = NULL_TREE; if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN)) { tree proto_quals, cp_type = NULL_TREE; cp_lexer_consume_token (parser->lexer); /* Eat '('. */ proto_quals = cp_parser_objc_protocol_qualifiers (parser); /* An ObjC type name may consist of just protocol qualifiers, in which case the type shall default to 'id'. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_CLOSE_PAREN)) cp_type = cp_parser_type_id (parser); /* APPLE LOCAL begin radar 6261630 */ if (cp_lexer_next_token_is_keyword (parser->lexer, RID_ATTRIBUTE)) { /* Chain on the trailing attribute. */ tree attrs = chainon (NULL_TREE, cp_parser_attributes_opt (parser)); cplus_decl_attributes (&cp_type, attrs, 0); } /* APPLE LOCAL end radar 6261630 */ cp_parser_require (parser, CPP_CLOSE_PAREN, "`)'"); typename = build_tree_list (proto_quals, cp_type); } return typename; } /* Check to see if TYPE refers to an Objective-C selector name. */ static bool cp_parser_objc_selector_p (enum cpp_ttype type) { return (type == CPP_NAME || type == CPP_KEYWORD || type == CPP_AND_AND || type == CPP_AND_EQ || type == CPP_AND || type == CPP_OR || type == CPP_COMPL || type == CPP_NOT || type == CPP_NOT_EQ || type == CPP_OR_OR || type == CPP_OR_EQ || type == CPP_XOR || type == CPP_XOR_EQ); } /* Parse an Objective-C selector. */ static tree cp_parser_objc_selector (cp_parser* parser) { cp_token *token = cp_lexer_consume_token (parser->lexer); if (!cp_parser_objc_selector_p (token->type)) { error ("invalid Objective-C++ selector name"); return error_mark_node; } /* C++ operator names are allowed to appear in ObjC selectors. */ switch (token->type) { case CPP_AND_AND: return get_identifier ("and"); case CPP_AND_EQ: return get_identifier ("and_eq"); case CPP_AND: return get_identifier ("bitand"); case CPP_OR: return get_identifier ("bitor"); case CPP_COMPL: return get_identifier ("compl"); case CPP_NOT: return get_identifier ("not"); case CPP_NOT_EQ: return get_identifier ("not_eq"); case CPP_OR_OR: return get_identifier ("or"); case CPP_OR_EQ: return get_identifier ("or_eq"); case CPP_XOR: return get_identifier ("xor"); case CPP_XOR_EQ: return get_identifier ("xor_eq"); default: return token->u.value; } } /* APPLE LOCAL begin radar 3803157 - objc attribute */ static void cp_parser_objc_maybe_attributes (cp_parser* parser, tree* attributes) { cp_token *token = cp_lexer_peek_token (parser->lexer); if (*attributes != NULL_TREE) { error ("method attributes must be specified at the end only"); *attributes = NULL_TREE; } if (token->keyword == RID_ATTRIBUTE) *attributes = cp_parser_attributes_opt (parser); } /* Parse an Objective-C params list. */ static tree cp_parser_objc_method_keyword_params (cp_parser* parser, tree* attributes) /* APPLE LOCAL end radar 3803157 - objc attribute */ { tree params = NULL_TREE; bool maybe_unary_selector_p = true; cp_token *token = cp_lexer_peek_token (parser->lexer); while (cp_parser_objc_selector_p (token->type) || token->type == CPP_COLON) { tree selector = NULL_TREE, typename, identifier; /* APPLE LOCAL radar 4157812 */ tree attr = NULL_TREE; if (token->type != CPP_COLON) selector = cp_parser_objc_selector (parser); /* Detect if we have a unary selector. */ if (maybe_unary_selector_p && cp_lexer_next_token_is_not (parser->lexer, CPP_COLON)) /* APPLE LOCAL begin radar 3803157 - objc attribute */ { cp_parser_objc_maybe_attributes (parser, attributes); if (cp_lexer_next_token_is_not (parser->lexer, CPP_COLON)) return selector; } /* APPLE LOCAL end radar 3803157 - objc attribute */ maybe_unary_selector_p = false; cp_parser_require (parser, CPP_COLON, "`:'"); typename = cp_parser_objc_typename (parser); /* APPLE LOCAL radar 4157812 */ cp_parser_objc_maybe_attributes (parser, &attr); identifier = cp_parser_identifier (parser); /* APPLE LOCAL radar 3803157 - objc attribute */ cp_parser_objc_maybe_attributes (parser, attributes); params = chainon (params, objc_build_keyword_decl (selector, typename, /* APPLE LOCAL radar 4157812 */ identifier, attr)); token = cp_lexer_peek_token (parser->lexer); } /* APPLE LOCAL begin radar 4290840 */ if (params == NULL_TREE) { cp_parser_error (parser, "objective-c++ method declaration is expected"); return error_mark_node; } /* APPLE LOCAL end radar 4290840 */ return params; } /* Parse the non-keyword Objective-C params. */ static tree /* APPLE LOCAL radar 3803157 - objc attribute */ cp_parser_objc_method_tail_params_opt (cp_parser* parser, bool *ellipsisp, tree* attributes) { tree params = make_node (TREE_LIST); cp_token *token = cp_lexer_peek_token (parser->lexer); *ellipsisp = false; /* Initially, assume no ellipsis. */ while (token->type == CPP_COMMA) { cp_parameter_declarator *parmdecl; tree parm; cp_lexer_consume_token (parser->lexer); /* Eat ','. */ token = cp_lexer_peek_token (parser->lexer); if (token->type == CPP_ELLIPSIS) { cp_lexer_consume_token (parser->lexer); /* Eat '...'. */ *ellipsisp = true; /* APPLE LOCAL radar 3803157 - objc attribute */ cp_parser_objc_maybe_attributes (parser, attributes); break; } parmdecl = cp_parser_parameter_declaration (parser, false, NULL); parm = grokdeclarator (parmdecl->declarator, &parmdecl->decl_specifiers, PARM, /*initialized=*/0, /*attrlist=*/NULL); chainon (params, build_tree_list (NULL_TREE, parm)); token = cp_lexer_peek_token (parser->lexer); } return params; } /* Parse a linkage specification, a pragma, an extra semicolon or a block. */ static void cp_parser_objc_interstitial_code (cp_parser* parser) { cp_token *token = cp_lexer_peek_token (parser->lexer); /* If the next token is `extern' and the following token is a string literal, then we have a linkage specification. */ if (token->keyword == RID_EXTERN && cp_parser_is_string_literal (cp_lexer_peek_nth_token (parser->lexer, 2))) cp_parser_linkage_specification (parser); /* Handle #pragma, if any. */ else if (token->type == CPP_PRAGMA) cp_parser_pragma (parser, pragma_external); /* Allow stray semicolons. */ else if (token->type == CPP_SEMICOLON) cp_lexer_consume_token (parser->lexer); /* APPLE LOCAL begin C* language */ else if (token->keyword == RID_AT_OPTIONAL) { cp_lexer_consume_token (parser->lexer); objc_set_method_opt (1); } else if (token->keyword == RID_AT_REQUIRED) { cp_lexer_consume_token (parser->lexer); objc_set_method_opt (0); } /* APPLE LOCAL end C* language */ /* APPLE LOCAL begin radar 4508851 */ else if (token->keyword == RID_NAMESPACE) cp_parser_namespace_definition (parser); /* APPLE LOCAL end radar 4508851 */ /* APPLE LOCAL begin 4093475 */ /* Other stray characters must generate errors. */ else if (token->type == CPP_OPEN_BRACE || token->type == CPP_CLOSE_BRACE) { cp_lexer_consume_token (parser->lexer); error ("stray %<%s%> between Objective-C++ methods", token->type == CPP_OPEN_BRACE ? "{" : "}"); } /* APPLE LOCAL end 4093475 */ /* APPLE LOCAL begin radar 5976344 */ else if (token->keyword == RID_TEMPLATE) cp_parser_declaration (parser); /* APPLE LOCAL end radar 5976344 */ /* Finally, try to parse a block-declaration, or a function-definition. */ else cp_parser_block_declaration (parser, /*statement_p=*/false); } /* Parse a method signature. */ static tree /* APPLE LOCAL radar 3803157 - objc attribute */ cp_parser_objc_method_signature (cp_parser* parser, tree* attributes) { tree rettype, kwdparms, optparms; bool ellipsis = false; cp_parser_objc_method_type (parser); rettype = cp_parser_objc_typename (parser); /* APPLE LOCAL begin radar 3803157 - objc attribute */ *attributes = NULL_TREE; kwdparms = cp_parser_objc_method_keyword_params (parser, attributes); optparms = cp_parser_objc_method_tail_params_opt (parser, &ellipsis, attributes); /* APPLE LOCAL end radar 3803157 - objc attribute */ return objc_build_method_signature (rettype, kwdparms, optparms, ellipsis); } /* APPLE LOCAL Pars --> Parse */ /* Parse an Objective-C method prototype list. */ static void cp_parser_objc_method_prototype_list (cp_parser* parser) { cp_token *token = cp_lexer_peek_token (parser->lexer); /* APPLE LOCAL 4093475 */ while (token->keyword != RID_AT_END && token->type != CPP_EOF) { if (token->type == CPP_PLUS || token->type == CPP_MINUS) { /* APPLE LOCAL begin radar 3803157 - objc attribute */ tree attributes, sig; sig = cp_parser_objc_method_signature (parser, &attributes); objc_add_method_declaration (sig, attributes); /* APPLE LOCAL end radar 3803157 - objc attribute */ cp_parser_consume_semicolon_at_end_of_statement (parser); } /* APPLE LOCAL begin C* interface */ else if (token->keyword == RID_AT_PROPERTY) objc_cp_parser_at_property (parser); /* APPLE LOCAL end C* interface */ else /* Allow for interspersed non-ObjC++ code. */ cp_parser_objc_interstitial_code (parser); token = cp_lexer_peek_token (parser->lexer); } /* APPLE LOCAL 4093475 */ cp_parser_require_keyword (parser, RID_AT_END, "`@end'"); objc_finish_interface (); } /* Parse an Objective-C method definition list. */ static void cp_parser_objc_method_definition_list (cp_parser* parser) { cp_token *token = cp_lexer_peek_token (parser->lexer); /* APPLE LOCAL 4093475 */ while (token->keyword != RID_AT_END && token->type != CPP_EOF) { tree meth; if (token->type == CPP_PLUS || token->type == CPP_MINUS) { /* APPLE LOCAL radar 4290840 */ cp_token *ptk; /* APPLE LOCAL begin radar 3803157 - objc attribute */ tree sig, attribute; push_deferring_access_checks (dk_deferred); sig = cp_parser_objc_method_signature (parser, &attribute); objc_start_method_definition (sig, attribute); /* APPLE LOCAL end radar 3803157 - objc attribute */ /* For historical reasons, we accept an optional semicolon. */ if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON)) cp_lexer_consume_token (parser->lexer); /* APPLE LOCAL begin radar 4290840 */ /* Check for all possibilities of illegal lookahead tokens. */ ptk = cp_lexer_peek_token (parser->lexer); /* APPLE LOCAL radar 6271728 */ if (ptk->type == CPP_OPEN_BRACE) { perform_deferred_access_checks (); stop_deferring_access_checks (); meth = cp_parser_function_definition_after_declarator (parser, false); pop_deferring_access_checks (); objc_finish_method_definition (meth); } /* APPLE LOCAL begin radar 6271728 */ else cp_parser_require (parser, CPP_OPEN_BRACE, "`{'"); /* APPLE LOCAL end radar 6271728 */ /* APPLE LOCAL end radar 4290840 */ } /* APPLE LOCAL begin C* interface */ else if (token->keyword == RID_AT_PROPERTY) objc_cp_parser_at_property (parser); /* APPLE LOCAL end C* interface */ /* APPLE LOCAL begin objc new property */ else if (token->keyword == RID_AT_SYNTHESIZE || token->keyword == RID_AT_DYNAMIC) objc_cp_parser_property_impl (parser, token->keyword); /* APPLE LOCAL end objc new property */ else /* Allow for interspersed non-ObjC++ code. */ cp_parser_objc_interstitial_code (parser); token = cp_lexer_peek_token (parser->lexer); } /* APPLE LOCAL 4093475 */ cp_parser_require_keyword (parser, RID_AT_END, "`@end'"); objc_finish_implementation (); } /* Parse Objective-C ivars. */ static void cp_parser_objc_class_ivars (cp_parser* parser) { cp_token *token = cp_lexer_peek_token (parser->lexer); if (token->type != CPP_OPEN_BRACE) return; /* No ivars specified. */ cp_lexer_consume_token (parser->lexer); /* Eat '{'. */ token = cp_lexer_peek_token (parser->lexer); /* APPLE LOCAL begin radar 4261146 */ while (token->type != CPP_CLOSE_BRACE && token->keyword != RID_AT_END && token->type != CPP_EOF) /* APPLE LOCAL end radar 4261146 */ { cp_decl_specifier_seq declspecs; int decl_class_or_enum_p; tree prefix_attributes; cp_parser_objc_visibility_spec (parser); if (cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_BRACE)) break; cp_parser_decl_specifier_seq (parser, CP_PARSER_FLAGS_OPTIONAL, &declspecs, &decl_class_or_enum_p); /* APPLE LOCAL begin radar 4360010 */ if (declspecs.storage_class == sc_static) { error ("storage class specified for ivar"); /* recover */ declspecs.storage_class = sc_none; } /* APPLE LOCAL end radar 4360010 */ /* APPLE LOCAL begin radar 4652027 */ else if (declspecs.specs[(int) ds_typedef]) { error ("typedef declaration among ivars"); cp_lexer_consume_token (parser->lexer); /* recover */ } /* APPLE LOCAL end radar 4652027 */ prefix_attributes = declspecs.attributes; declspecs.attributes = NULL_TREE; /* Keep going until we hit the `;' at the end of the declaration. */ while (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON)) { tree width = NULL_TREE, attributes, first_attribute, decl; cp_declarator *declarator = NULL; int ctor_dtor_or_conv_p; /* Check for a (possibly unnamed) bitfield declaration. */ token = cp_lexer_peek_token (parser->lexer); if (token->type == CPP_COLON) goto eat_colon; if (token->type == CPP_NAME && (cp_lexer_peek_nth_token (parser->lexer, 2)->type == CPP_COLON)) { /* Get the name of the bitfield. */ declarator = make_id_declarator (NULL_TREE, cp_parser_identifier (parser), sfk_none); eat_colon: cp_lexer_consume_token (parser->lexer); /* Eat ':'. */ /* Get the width of the bitfield. */ width = cp_parser_constant_expression (parser, /*allow_non_constant=*/false, NULL); } else { /* Parse the declarator. */ declarator = cp_parser_declarator (parser, CP_PARSER_DECLARATOR_NAMED, &ctor_dtor_or_conv_p, /*parenthesized_p=*/NULL, /*member_p=*/false); } /* Look for attributes that apply to the ivar. */ attributes = cp_parser_attributes_opt (parser); /* Remember which attributes are prefix attributes and which are not. */ first_attribute = attributes; /* Combine the attributes. */ attributes = chainon (prefix_attributes, attributes); if (width) { /* Create the bitfield declaration. */ decl = grokbitfield (declarator, &declspecs, width); cplus_decl_attributes (&decl, attributes, /*flags=*/0); } else decl = grokfield (declarator, &declspecs, NULL_TREE, /*init_const_expr_p=*/false, NULL_TREE, attributes); /* Add the instance variable. */ objc_add_instance_variable (decl); /* Reset PREFIX_ATTRIBUTES. */ while (attributes && TREE_CHAIN (attributes) != first_attribute) attributes = TREE_CHAIN (attributes); if (attributes) TREE_CHAIN (attributes) = NULL_TREE; token = cp_lexer_peek_token (parser->lexer); if (token->type == CPP_COMMA) { cp_lexer_consume_token (parser->lexer); /* Eat ','. */ continue; } break; } cp_parser_consume_semicolon_at_end_of_statement (parser); token = cp_lexer_peek_token (parser->lexer); } cp_lexer_consume_token (parser->lexer); /* Eat '}'. */ /* For historical reasons, we accept an optional semicolon. */ if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON)) cp_lexer_consume_token (parser->lexer); } /* Parse an Objective-C protocol declaration. */ static void /* APPLE LOCAL radar 4947311 */ cp_parser_objc_protocol_declaration (cp_parser* parser, tree attributes) { tree proto, protorefs; cp_token *tok; cp_lexer_consume_token (parser->lexer); /* Eat '@protocol'. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_NAME)) { error ("identifier expected after %<@protocol%>"); goto finish; } /* See if we have a forward declaration or a definition. */ tok = cp_lexer_peek_nth_token (parser->lexer, 2); /* Try a forward declaration first. */ if (tok->type == CPP_COMMA || tok->type == CPP_SEMICOLON) { /* APPLE LOCAL radar 4947311 */ objc_declare_protocols (cp_parser_objc_identifier_list (parser), attributes); finish: cp_parser_consume_semicolon_at_end_of_statement (parser); } /* Ok, we got a full-fledged definition (or at least should). */ else { proto = cp_parser_identifier (parser); protorefs = cp_parser_objc_protocol_refs_opt (parser); /* APPLE LOCAL radar 4947311 */ objc_start_protocol (proto, protorefs, attributes); cp_parser_objc_method_prototype_list (parser); } } /* Parse an Objective-C superclass or category. */ /* APPLE LOCAL begin radar 4965989 */ static void cp_parser_objc_superclass_or_category (cp_parser *parser, tree *super, tree *categ, bool *is_category) { cp_token *next = cp_lexer_peek_token (parser->lexer); *super = *categ = NULL_TREE; *is_category = false; if (next->type == CPP_COLON) { cp_lexer_consume_token (parser->lexer); /* Eat ':'. */ *super = cp_parser_identifier (parser); } else if (next->type == CPP_OPEN_PAREN) { cp_lexer_consume_token (parser->lexer); /* Eat '('. */ /* APPLE LOCAL begin radar 4965989 */ next = cp_lexer_peek_token (parser->lexer); *categ = (next->type == CPP_CLOSE_PAREN) ? NULL_TREE : cp_parser_identifier (parser); *is_category = true; /* APPLE LOCAL end radar 4965989 */ cp_parser_require (parser, CPP_CLOSE_PAREN, "`)'"); } } /* APPLE LOCAL end radar 4965989 */ /* Parse an Objective-C class interface. */ static void /* APPLE LOCAL radar 4947311 */ cp_parser_objc_class_interface (cp_parser* parser, tree attributes) { tree name, super, categ, protos; /* APPLE LOCAL radar 4965989 */ bool is_categ; /* APPLE LOCAL radar 4947311 */ /* Code for radar 4548636 removed. */ cp_lexer_consume_token (parser->lexer); /* Eat '@interface'. */ name = cp_parser_identifier (parser); /* APPLE LOCAL radar 4965989 */ cp_parser_objc_superclass_or_category (parser, &super, &categ, &is_categ); protos = cp_parser_objc_protocol_refs_opt (parser); /* We have either a class or a category on our hands. */ /* APPLE LOCAL radar 4965989 */ if (is_categ) /* APPLE LOCAL begin radar 4548636 */ { if (attributes) error ("attributes may not be specified on a category"); objc_start_category_interface (name, categ, protos); } /* APPLE LOCAL end radar 4548636 */ else { /* APPLE LOCAL radar 4548636 */ objc_start_class_interface (name, super, protos, attributes); /* Handle instance variable declarations, if any. */ cp_parser_objc_class_ivars (parser); objc_continue_interface (); } cp_parser_objc_method_prototype_list (parser); } /* Parse an Objective-C class implementation. */ static void cp_parser_objc_class_implementation (cp_parser* parser) { tree name, super, categ; /* APPLE LOCAL radar 4965989 */ bool is_categ; cp_lexer_consume_token (parser->lexer); /* Eat '@implementation'. */ name = cp_parser_identifier (parser); /* APPLE LOCAL radar 4965989 */ cp_parser_objc_superclass_or_category (parser, &super, &categ, &is_categ); /* We have either a class or a category on our hands. */ /* APPLE LOCAL begin radar 4965989 */ if (is_categ) { if (categ == NULL_TREE) { error ("cannot implement anonymous category"); return; } objc_start_category_implementation (name, categ); } /* APPLE LOCAL end radar 4965989 */ else { objc_start_class_implementation (name, super); /* Handle instance variable declarations, if any. */ cp_parser_objc_class_ivars (parser); objc_continue_implementation (); } cp_parser_objc_method_definition_list (parser); } /* Consume the @end token and finish off the implementation. */ static void cp_parser_objc_end_implementation (cp_parser* parser) { cp_lexer_consume_token (parser->lexer); /* Eat '@end'. */ objc_finish_implementation (); } /* Parse an Objective-C declaration. */ static void cp_parser_objc_declaration (cp_parser* parser) { /* Try to figure out what kind of declaration is present. */ cp_token *kwd = cp_lexer_peek_token (parser->lexer); switch (kwd->keyword) { case RID_AT_ALIAS: cp_parser_objc_alias_declaration (parser); break; case RID_AT_CLASS: cp_parser_objc_class_declaration (parser); break; case RID_AT_PROTOCOL: /* APPLE LOCAL radar 4947311 */ cp_parser_objc_protocol_declaration (parser, NULL_TREE); break; /* APPLE LOCAL begin radar 4548636 - radar 4947311 */ case RID_ATTRIBUTE: { tree attributes = NULL_TREE; cp_parser_objc_maybe_attributes (parser, &attributes); if (cp_lexer_peek_token (parser->lexer)->keyword == RID_AT_INTERFACE) cp_parser_objc_class_interface (parser, attributes); else if (cp_lexer_peek_token (parser->lexer)->keyword == RID_AT_PROTOCOL) cp_parser_objc_protocol_declaration (parser, attributes); break; } /* APPLE LOCAL end radar 4548636 - radar 4947311 */ case RID_AT_INTERFACE: /* APPLE LOCAL radar 4947311 */ cp_parser_objc_class_interface (parser, NULL_TREE); break; case RID_AT_IMPLEMENTATION: cp_parser_objc_class_implementation (parser); break; case RID_AT_END: cp_parser_objc_end_implementation (parser); break; default: error ("misplaced %<@%D%> Objective-C++ construct", kwd->u.value); cp_parser_skip_to_end_of_block_or_statement (parser); } } /* Parse an Objective-C try-catch-finally statement. objc-try-catch-finally-stmt: @try compound-statement objc-catch-clause-seq [opt] objc-finally-clause [opt] objc-catch-clause-seq: objc-catch-clause objc-catch-clause-seq [opt] objc-catch-clause: @catch ( exception-declaration ) compound-statement objc-finally-clause @finally compound-statement Returns NULL_TREE. */ static tree cp_parser_objc_try_catch_finally_statement (cp_parser *parser) { location_t location; tree stmt; cp_parser_require_keyword (parser, RID_AT_TRY, "`@try'"); location = cp_lexer_peek_token (parser->lexer)->location; /* NB: The @try block needs to be wrapped in its own STATEMENT_LIST node, lest it get absorbed into the surrounding block. */ stmt = push_stmt_list (); /* APPLE LOCAL radar 5982990 */ cp_parser_compound_statement (parser, NULL, false, false); objc_begin_try_stmt (location, pop_stmt_list (stmt)); while (cp_lexer_next_token_is_keyword (parser->lexer, RID_AT_CATCH)) { cp_parameter_declarator *parmdecl; tree parm; /* APPLE LOCAL radar 2848255 */ bool ellipsis_seen = false; cp_lexer_consume_token (parser->lexer); cp_parser_require (parser, CPP_OPEN_PAREN, "`('"); /* APPLE LOCAL begin radar 2848255 */ /* APPLE LOCAL begin radar 4995967 */ { cp_token *token = cp_lexer_peek_token (parser->lexer); if (token->type == CPP_ELLIPSIS) { /* @catch (...) */ parm = NULL_TREE; cp_lexer_consume_token (parser->lexer); ellipsis_seen = true; } } /* APPLE LOCAL end radar 4995967 */ if (!ellipsis_seen) { parmdecl = cp_parser_parameter_declaration (parser, false, NULL); parm = grokdeclarator (parmdecl->declarator, &parmdecl->decl_specifiers, PARM, /*initialized=*/0, /*attrlist=*/NULL); } /* APPLE LOCAL end radar 2848255 */ cp_parser_require (parser, CPP_CLOSE_PAREN, "`)'"); objc_begin_catch_clause (parm); /* APPLE LOCAL radar 5982990 */ cp_parser_compound_statement (parser, NULL, false, false); objc_finish_catch_clause (); } if (cp_lexer_next_token_is_keyword (parser->lexer, RID_AT_FINALLY)) { cp_lexer_consume_token (parser->lexer); location = cp_lexer_peek_token (parser->lexer)->location; /* NB: The @finally block needs to be wrapped in its own STATEMENT_LIST node, lest it get absorbed into the surrounding block. */ stmt = push_stmt_list (); /* APPLE LOCAL radar 5982990 */ cp_parser_compound_statement (parser, NULL, false, false); objc_build_finally_clause (location, pop_stmt_list (stmt)); } return objc_finish_try_stmt (); } /* Parse an Objective-C synchronized statement. objc-synchronized-stmt: @synchronized ( expression ) compound-statement Returns NULL_TREE. */ static tree cp_parser_objc_synchronized_statement (cp_parser *parser) { location_t location; tree lock, stmt; cp_parser_require_keyword (parser, RID_AT_SYNCHRONIZED, "`@synchronized'"); location = cp_lexer_peek_token (parser->lexer)->location; cp_parser_require (parser, CPP_OPEN_PAREN, "`('"); lock = cp_parser_expression (parser, false); cp_parser_require (parser, CPP_CLOSE_PAREN, "`)'"); /* NB: The @synchronized block needs to be wrapped in its own STATEMENT_LIST node, lest it get absorbed into the surrounding block. */ stmt = push_stmt_list (); /* APPLE LOCAL radar 5982990 */ cp_parser_compound_statement (parser, NULL, false, flag_objc_sjlj_exceptions); return objc_build_synchronized (location, lock, pop_stmt_list (stmt)); } /* Parse an Objective-C throw statement. objc-throw-stmt: @throw assignment-expression [opt] ; Returns a constructed '@throw' statement. */ static tree cp_parser_objc_throw_statement (cp_parser *parser) { tree expr = NULL_TREE; cp_parser_require_keyword (parser, RID_AT_THROW, "`@throw'"); if (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON)) expr = cp_parser_assignment_expression (parser, false); cp_parser_consume_semicolon_at_end_of_statement (parser); return objc_build_throw_stmt (expr); } /* Parse an Objective-C statement. */ static tree cp_parser_objc_statement (cp_parser * parser) { /* Try to figure out what kind of declaration is present. */ cp_token *kwd = cp_lexer_peek_token (parser->lexer); switch (kwd->keyword) { case RID_AT_TRY: return cp_parser_objc_try_catch_finally_statement (parser); case RID_AT_SYNCHRONIZED: return cp_parser_objc_synchronized_statement (parser); case RID_AT_THROW: return cp_parser_objc_throw_statement (parser); default: error ("misplaced %<@%D%> Objective-C++ construct", kwd->u.value); cp_parser_skip_to_end_of_block_or_statement (parser); } return error_mark_node; } /* APPLE LOCAL begin C* language */ /* Routine closes up the C*'s foreach statement. */ static void objc_finish_foreach_stmt (tree for_stmt) { if (flag_new_for_scope > 0) { tree scope = TREE_CHAIN (for_stmt); TREE_CHAIN (for_stmt) = NULL; add_stmt (do_poplevel (scope)); } finish_stmt (); } /* Synthesizer routine for C*'s feareach statement. It synthesizes: for ( type elem in collection) { stmts; } Into: { type elem; __objcFastEnumerationState enumState = { 0 }; id items[16]; unsigned long limit = [collection countByEnumeratingWithState:&enumState objects:items count:16]; if (limit) { unsigned long startMutations = *enumState.mutationsPtr; do { unsigned long counter = 0; do { if (startMutations != *enumState.mutationsPtr) objc_enumerationMutation(collection); elem = enumState.itemsPtr[counter++]; stmts; } while (counter < limit); } while (limit = [collection countByEnumeratingWithState:&enumState objects:items count:16]); } else elem = nil; radar 4854605, 5128402 */ static void objc_foreach_stmt (cp_parser* parser, tree statement) { unsigned char in_statement; tree enumerationMutation_call_exp; tree countByEnumeratingWithState; tree receiver; tree exp, bind; tree enumState_decl, items_decl; tree limit_decl, limit_decl_assign_expr; tree outer_if_stmt, inner_if_stmt, if_condition, startMutations_decl; tree outer_do_stmt, inner_do_stmt, do_condition; tree counter_decl; tree_stmt_iterator i = tsi_start (TREE_CHAIN (statement)); tree t = tsi_stmt (i); /* APPLE LOCAL radar 5130983 */ tree elem_decl = TREE_CODE (t) == DECL_EXPR ? DECL_EXPR_DECL (t) : t; receiver = cp_parser_condition (parser); cp_parser_require (parser, CPP_CLOSE_PAREN, "`)'"); /* APPLE LOCAL begin radar 5130983 */ if (elem_decl == error_mark_node) return; if (!lvalue_or_else (&elem_decl, lv_foreach)) return; /* APPLE LOCAL end radar 5130983 */ /* APPLE LOCAL begin radar 4507230 */ if (!objc_type_valid_for_messaging (TREE_TYPE (elem_decl))) { error ("selector element does not have a valid object type"); return; } if (!objc_type_valid_for_messaging (TREE_TYPE (receiver))) { error ("expression does not have a valid object type"); return; } /* APPLE LOCAL end radar 4507230 */ enumerationMutation_call_exp = objc_build_foreach_components (receiver, &enumState_decl, &items_decl, &limit_decl, &startMutations_decl, &counter_decl, &countByEnumeratingWithState); /* __objcFastEnumerationState enumState = { 0 }; */ exp = build_stmt (DECL_EXPR, enumState_decl); bind = build3 (BIND_EXPR, void_type_node, enumState_decl, exp, NULL); TREE_SIDE_EFFECTS (bind) = 1; add_stmt (bind); /* id items[16]; */ bind = build3 (BIND_EXPR, void_type_node, items_decl, NULL, NULL); TREE_SIDE_EFFECTS (bind) = 1; add_stmt (bind); /* Generate this statement and add it to the list. */ /* limit = [collection countByEnumeratingWithState:&enumState objects:items count:16] */ limit_decl_assign_expr = build2 (MODIFY_EXPR, TREE_TYPE (limit_decl), limit_decl, countByEnumeratingWithState); bind = build3 (BIND_EXPR, void_type_node, limit_decl, NULL, NULL); TREE_SIDE_EFFECTS (bind) = 1; add_stmt (bind); /* if (limit) { */ outer_if_stmt = begin_if_stmt (); /* APPLE LOCAL radar 4547045 */ if_condition = build_binary_op (NE_EXPR, limit_decl_assign_expr, fold_convert (TREE_TYPE (limit_decl), integer_zero_node), 1); finish_if_stmt_cond (if_condition, outer_if_stmt); /* unsigned long startMutations = *enumState.mutationsPtr; */ exp = objc_build_component_ref (enumState_decl, get_identifier("mutationsPtr")); exp = build_indirect_ref (exp, "unary *"); exp = build2 (MODIFY_EXPR, void_type_node, startMutations_decl, exp); bind = build3 (BIND_EXPR, void_type_node, startMutations_decl, exp, NULL); TREE_SIDE_EFFECTS (bind) = 1; add_stmt (bind); /* do { */ /* APPLE LOCAL begin for-fsf-4_4 3274130 5295549 */ \ outer_do_stmt = begin_do_stmt (NULL_TREE); /* APPLE LOCAL end for-fsf-4_4 3274130 5295549 */ \ /* Body of the outer do-while loop */ /* unsigned int counter = 0; */ exp = build2 (MODIFY_EXPR, void_type_node, counter_decl, fold_convert (TREE_TYPE (counter_decl), integer_zero_node)); bind = build3 (BIND_EXPR, void_type_node, counter_decl, exp, NULL); TREE_SIDE_EFFECTS (bind) = 1; add_stmt (bind); /* do { */ /* APPLE LOCAL begin for-fsf-4_4 3274130 5295549 */ \ inner_do_stmt = begin_do_stmt (NULL_TREE); /* APPLE LOCAL end for-fsf-4_4 3274130 5295549 */ \ /* Body of the inner do-while loop */ /* if (startMutations != *enumState.mutationsPtr) objc_enumerationMutation (collection); */ inner_if_stmt = begin_if_stmt (); exp = objc_build_component_ref (enumState_decl, get_identifier("mutationsPtr")); exp = build_indirect_ref (exp, "unary *"); if_condition = build_binary_op (NE_EXPR, startMutations_decl, exp, 1); finish_if_stmt_cond (if_condition, inner_if_stmt); add_stmt (enumerationMutation_call_exp); finish_then_clause (inner_if_stmt); finish_if_stmt (inner_if_stmt); /* elem = enumState.itemsPtr [counter]; */ exp = objc_build_component_ref (enumState_decl, get_identifier("itemsPtr")); exp = build_array_ref (exp, counter_decl); add_stmt (build2 (MODIFY_EXPR, void_type_node, elem_decl, exp)); /* APPLE LOCAL radar 4538105 */ TREE_USED (elem_decl) = 1; /* counter++; */ exp = build2 (PLUS_EXPR, TREE_TYPE (counter_decl), counter_decl, build_int_cst (NULL_TREE, 1)); add_stmt (build2 (MODIFY_EXPR, void_type_node, counter_decl, exp)); /* ADD << stmts >> from the foreach loop. */ /* Parse the body of the for-statement. */ in_statement = parser->in_statement; parser->in_statement = IN_ITERATION_STMT; cp_parser_already_scoped_statement (parser); parser->in_statement = in_statement; finish_do_body (inner_do_stmt); /* } while (counter < limit ); */ do_condition = build_binary_op (LT_EXPR, counter_decl, limit_decl, 1); finish_do_stmt (do_condition, inner_do_stmt); DO_FOREACH (inner_do_stmt) = integer_zero_node; /* APPLE LOCAL radar 4667060 */ DO_FOREACH (outer_do_stmt) = elem_decl; finish_do_body (outer_do_stmt); /* } while (limit = [collection countByEnumeratingWithState:&enumState objects:items count:16]); */ exp = unshare_expr (limit_decl_assign_expr); do_condition = build_binary_op (NE_EXPR, exp, fold_convert (TREE_TYPE (limit_decl), integer_zero_node), 1); finish_do_stmt (do_condition, outer_do_stmt); finish_then_clause (outer_if_stmt); /* } */ /* APPLE LOCAL begin radar 4854605 - radar 5128402 */ begin_else_clause (outer_if_stmt); add_stmt (build2 (MODIFY_EXPR, void_type_node, elem_decl, fold_convert (TREE_TYPE (elem_decl), integer_zero_node))); finish_else_clause (outer_if_stmt); /* APPLE LOCAL end radar 4854605 - radar 5128402 */ finish_if_stmt (outer_if_stmt); objc_finish_foreach_stmt (statement); } /* APPLE LOCAL end C* language */ /* APPLE LOCAL begin blocks 6040305 (ce) */ #define I_SYMBOL_BINDING(t) IDENTIFIER_BINDING(t) tree build_component_ref (tree e, tree member); tree build_component_ref (tree e, tree member) { if (!DECL_P (member)) member = lookup_member (TREE_TYPE (e), member, 0, 0); if (processing_template_decl) return build3 (COMPONENT_REF, TREE_TYPE (member), e, DECL_NAME (member), NULL_TREE); return build_class_member_access_expr (e, member, NULL_TREE, false); } /* APPLE LOCAL begin radar 6214617 */ static bool cp_block_requires_copying (tree exp) { return (block_requires_copying (exp) || TYPE_HAS_CONSTRUCTOR (TREE_TYPE (exp)) || TYPE_NEEDS_CONSTRUCTING (TREE_TYPE (exp))); } /* APPLE LOCAL end radar 6214617 */ /* APPLE LOCAL begin radar 5847213 - radar 6329245 */ /** build_descriptor_block_decl - This routine builds a static block_descriptior variable of type: struct __block_descriptor; and initializes it to: {0, sizeof(struct literal_block_n), copy_helper_block_1, // only if block BLOCK_HAS_COPY_DISPOSE destroy_helper_block_1, // only if block BLOCK_HAS_COPY_DISPOSE // APPLE LOCAL begin radar 8143947 // const char *signature; // the block signature set to 0 // const char *layout; // reserved set to 0 // APPLE LOCAL end radar 8143947 } */ static tree build_descriptor_block_decl (tree block_struct_type, struct block_sema_info *block_impl) { extern tree create_tmp_var_raw (tree, const char *); static int desc_unique_count; int size; tree helper_addr; tree decl, constructor; char name [32]; VEC(constructor_elt,gc) *impl_v = NULL; tree descriptor_type = TREE_TYPE (build_block_descriptor_type (block_impl->BlockHasCopyDispose)); sprintf (name, "__block_descriptor_tmp_%d", ++desc_unique_count); decl = create_tmp_var_raw (descriptor_type, name); DECL_CONTEXT (decl) = NULL_TREE; /* Initialize "reserved" field to 0 for now. */ CONSTRUCTOR_APPEND_ELT(impl_v, NULL_TREE, build_int_cst (long_unsigned_type_node, 0)); /* Initialize "Size" field. */ size = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (block_struct_type)); CONSTRUCTOR_APPEND_ELT(impl_v, NULL_TREE, build_int_cst (long_unsigned_type_node, size)); if (block_impl->BlockHasCopyDispose) { /* Initialize "CopyFuncPtr" and "DestroyFuncPtr" fields. */ /* Helpers were previously generated completeley as a nested function (and context was required for code gen.) But they are not, so context must be set to NULL so initialization logic does not complain. */ DECL_CONTEXT (block_impl->copy_helper_func_decl) = NULL_TREE; helper_addr = build_fold_addr_expr (block_impl->copy_helper_func_decl); helper_addr = convert (ptr_type_node, helper_addr); CONSTRUCTOR_APPEND_ELT(impl_v, NULL_TREE, helper_addr); DECL_CONTEXT (block_impl->destroy_helper_func_decl) = NULL_TREE; helper_addr = build_fold_addr_expr (block_impl->destroy_helper_func_decl); helper_addr = convert (ptr_type_node, helper_addr); CONSTRUCTOR_APPEND_ELT(impl_v, NULL_TREE, helper_addr); } /* APPLE LOCAL begin radar 8143947 */ /* signature field is set to 0 */ CONSTRUCTOR_APPEND_ELT(impl_v, NULL_TREE, build_int_cst (build_pointer_type (char_type_node), 0)); /* layout field is set to 0 */ CONSTRUCTOR_APPEND_ELT(impl_v, NULL_TREE, build_int_cst (build_pointer_type (char_type_node), 0)); /* APPLE LOCAL end radar 8143947 */ /* Create a CONSTRUCTOR to represent the braced-initializer. */ constructor = make_node (CONSTRUCTOR); CONSTRUCTOR_ELTS (constructor) = impl_v; TREE_PUBLIC (decl) = 0; TREE_STATIC (decl) = 1; cp_finish_decl (decl, constructor, 0, 0, LOOKUP_ONLYCONVERTING); return decl; } /* APPLE LOCAL begin radar 6300081 */ /* This function builds a "generic" block struct type, to be passed into the debug information for blocks pointers, to allow gdb to find the actual function pointer for the block. Any time the Blocks structure layout changes, this may also need to change. Currently a block pointer is a pointer to a __block_literal_n struct, the third field of which is a pointer to a __block_descriptor struct, whose third field is the function pointer. There are other fields as well, but these are the ones gdb needs to know about to find the function pointer. Therefore a generic block struct currently looks like this: struct __block_literal_generic { void * __isa; int __flags; int __reserved; void *__FuncPtr; struct __block_descriptor { unsigned long int reserved; unsigned long int Size; // APPLE LOCAL begin radar 8143927 const char *signature; // the block signature const char *layout; // reserved // APPLE LOCAL end radar 8143927 } *__descriptor; }; IF AT ANY TIME THE STRUCTURE OF A __BLOCK_LITERAL_N CHANGES, THIS MUST BE CHANGED ALSO!! */ tree /* APPLE LOCAL radar 6353006 */ c_build_generic_block_struct_type (void) { tree fields = NULL_TREE; tree field; tree block_struct_type; push_to_top_level (); block_struct_type = xref_tag (record_type, get_identifier ("__block_literal_generic"), ts_current, false); xref_basetypes (block_struct_type, NULL_TREE); CLASSTYPE_DECLARED_CLASS (block_struct_type) = 0; pushclass (block_struct_type); field = build_decl (FIELD_DECL, get_identifier ("__isa"), ptr_type_node); TREE_CHAIN (field) = fields; fields = field; field = build_decl (FIELD_DECL, get_identifier ("__flags"), integer_type_node); TREE_CHAIN (field) = fields; fields = field; field = build_decl (FIELD_DECL, get_identifier ("__reserved"), integer_type_node); TREE_CHAIN (field) = fields; fields = field; field = build_decl (FIELD_DECL, get_identifier ("__FuncPtr"), ptr_type_node); TREE_CHAIN (field) = fields; fields = field; field = build_decl (FIELD_DECL, get_identifier ("__descriptor"), build_block_descriptor_type (false)); TREE_CHAIN (field) = fields; fields = field; TYPE_FIELDS (block_struct_type) = fields; TYPE_NAME (block_struct_type) = build_decl (TYPE_DECL, get_identifier ("__block_literal_generic"), block_struct_type); TYPE_STUB_DECL (block_struct_type) = TYPE_NAME (block_struct_type); TYPE_BLOCK_IMPL_STRUCT (block_struct_type) = 1; finish_struct (block_struct_type, NULL_TREE); pop_from_top_level (); return block_struct_type; } /* APPLE LOCAL end radar 6300081 */ /** build_block_struct_type - struct __block_literal_n { void *__isa; // initialized to &_NSConcreteStackBlock or &_NSConcreteGlobalBlock int __flags; int __reserved; void *__FuncPtr; struct __block_descriptor { unsigned long int reserved; // NULL unsigned long int Size; // sizeof(struct __block_literal_n) // optional helper functions void *CopyFuncPtr; // When BLOCK_HAS_COPY_DISPOSE void *DestroyFuncPtr; // When BLOCK_HAS_COPY_DISPOSE } *__descriptor; // imported variables int x; // ref variable list ... int *y; // byref variable list }; */ static tree build_block_struct_type (struct block_sema_info * block_impl) { tree fields = NULL_TREE, field, chain; char buffer[32]; static int unique_count; tree block_struct_type; /* Check and see if this block is required to have a Copy/Dispose helper function. If yes, set BlockHasCopyDispose to TRUE. */ for (chain = block_impl->block_ref_decl_list; chain; chain = TREE_CHAIN (chain)) /* APPLE LOCAL begin radar 6214617 */ if (cp_block_requires_copying (TREE_VALUE (chain))) { tree type = TREE_TYPE (TREE_VALUE (chain)); block_impl->BlockHasCopyDispose = TRUE; if (TYPE_HAS_CONSTRUCTOR (type) || TYPE_NEEDS_CONSTRUCTING (type)) { block_impl->BlockImportsCxxObjects = TRUE; break; } /* APPLE LOCAL end radar 6214617 */ } /* Further check to see that we have __block variables which require Copy/Dispose helpers. */ for (chain = block_impl->block_byref_decl_list; chain; chain = TREE_CHAIN (chain)) if (COPYABLE_BYREF_LOCAL_VAR (TREE_VALUE (chain))) { block_impl->BlockHasCopyDispose = TRUE; break; } sprintf(buffer, "__block_literal_%d", ++unique_count); push_to_top_level (); /* APPLE LOCAL begin radar 6243400 */ block_struct_type = xref_tag (record_type, get_identifier (buffer), ts_current, false); xref_basetypes (block_struct_type, NULL_TREE); CLASSTYPE_DECLARED_CLASS (block_struct_type) = 0; pushclass (block_struct_type); /* APPLE LOCAL end radar 6243400 */ /* void * __isa; */ field = build_decl (FIELD_DECL, get_identifier ("__isa"), ptr_type_node); TREE_CHAIN (field) = fields; fields = field; /* int __flags. */ field = build_decl (FIELD_DECL, get_identifier ("__flags"), integer_type_node); TREE_CHAIN (field) = fields; fields = field; /* int __reserved. */ field = build_decl (FIELD_DECL, get_identifier ("__reserved"), integer_type_node); TREE_CHAIN (field) = fields; fields = field; /* void *__FuncPtr. */ field = build_decl (FIELD_DECL, get_identifier ("__FuncPtr"), ptr_type_node); TREE_CHAIN (field) = fields; fields = field; /* struct __block_descriptor *__descriptor */ field = build_decl (FIELD_DECL, get_identifier ("__descriptor"), build_block_descriptor_type (block_impl->BlockHasCopyDispose)); TREE_CHAIN (field) = fields; fields = field; if (block_impl->BlockHasCopyDispose) { /* If inner block of a nested block has BlockHasCopyDispose, so does its outer block. */ if (block_impl->prev_block_info) block_impl->prev_block_info->BlockHasCopyDispose = TRUE; } /* int x; // ref variable list ... */ for (chain = block_impl->block_ref_decl_list; chain; chain = TREE_CHAIN (chain)) { tree p = TREE_VALUE (chain); /* Note! const-ness of copied in variable must not be carried over to the type of the synthesized struct field. It prevents to assign to this field when copy constructor is synthesized. */ field = build_decl (FIELD_DECL, DECL_NAME (p), c_build_qualified_type (TREE_TYPE (p), TYPE_UNQUALIFIED)); TREE_CHAIN (field) = fields; fields = field; } /* int *y; // byref variable list */ for (chain = block_impl->block_byref_decl_list; chain; chain = TREE_CHAIN (chain)) { tree p = TREE_VALUE (chain); field = build_decl (FIELD_DECL, DECL_NAME (p), TREE_TYPE (p)); TREE_CHAIN (field) = fields; fields = field; } /* APPLE LOCAL begin radar 6243400 */ TYPE_FIELDS (block_struct_type) = fields; TYPE_NAME (block_struct_type) = build_decl (TYPE_DECL, get_identifier (buffer), block_struct_type); TYPE_STUB_DECL (block_struct_type) = TYPE_NAME (block_struct_type); finish_struct (block_struct_type, NULL_TREE); pop_from_top_level (); /* APPLE LOCAL end radar 6243400 */ return block_struct_type; } /** build_block_struct_initlist - builds the initializer list: { &_NSConcreteStackBlock or &_NSConcreteGlobalBlock // __isa, BLOCK_USE_STRET | BLOCK_HAS_COPY_DISPOSE | BLOCK_IS_GLOBAL // __flags, | BLOCK_HAS_SIGNATURE // __flags 0, // __reserved, &helper_1, // __FuncPtr, &static_descriptor_variable // __descriptor, x, // user variables. &y ... } */ /* APPLE LOCAL begin radar 6169527 */ /* This routine is entirely rewritten as we now have to deal with full-blown c++ classes with fields which may require construction. */ static VEC(constructor_elt,gc) * build_block_struct_initlist (tree block_struct_type, struct block_sema_info *block_impl) { tree expr, chain, helper_addr; /* APPLE LOCAL radar 7735196 */ unsigned flags = BLOCK_HAS_SIGNATURE; static tree NSConcreteStackBlock_decl = NULL_TREE; static tree NSConcreteGlobalBlock_decl = NULL_TREE; VEC(constructor_elt,gc) *impl_v = NULL; tree descriptor_block_decl = build_descriptor_block_decl (block_struct_type, block_impl); if (block_impl->BlockHasCopyDispose) /* Note! setting of this flag merely indicates to the runtime that we have destroy_helper_block/copy_helper_block helper routines. */ flags |= BLOCK_HAS_COPY_DISPOSE; /* APPLE LOCAL begin radar 6214617 */ /* Set BLOCK_HAS_CXX_OBJ if block is importing a cxx object. */ if (block_impl->BlockImportsCxxObjects) flags |= BLOCK_HAS_CXX_OBJ; /* APPLE LOCAL end radar 6214617 */ /* APPLE LOCAL begin radar 7735196 */ if (block_impl->return_type && aggregate_value_p(block_impl->return_type, 0)) flags |= BLOCK_USE_STRET; /* APPLE LOCAL end 7735196 */ /* APPLE LOCAL begin radar 6230297 */ if (!current_function_decl || (block_impl->block_ref_decl_list == NULL_TREE && block_impl->block_byref_decl_list == NULL_TREE)) /* APPLE LOCAL end radar 6230297 */ { /* This is a global block. */ /* Find an existing declaration for _NSConcreteGlobalBlock or declare extern void *_NSConcreteGlobalBlock; */ if (NSConcreteGlobalBlock_decl == NULL_TREE) { tree name_id = get_identifier("_NSConcreteGlobalBlock"); NSConcreteGlobalBlock_decl = lookup_name (name_id); if (!NSConcreteGlobalBlock_decl) { NSConcreteGlobalBlock_decl = build_decl (VAR_DECL, name_id, ptr_type_node); DECL_EXTERNAL (NSConcreteGlobalBlock_decl) = 1; TREE_PUBLIC (NSConcreteGlobalBlock_decl) = 1; pushdecl_top_level (NSConcreteGlobalBlock_decl); rest_of_decl_compilation (NSConcreteGlobalBlock_decl, 0, 0); } } /* APPLE LOCAL begin radar 6457359 */ CONSTRUCTOR_APPEND_ELT(impl_v, NULL_TREE, convert (ptr_type_node, build_fold_addr_expr (NSConcreteGlobalBlock_decl))); /* APPLE LOCAL end radar 6457359 */ flags |= BLOCK_IS_GLOBAL; } else { /* Find an existing declaration for _NSConcreteStackBlock or declare extern void *_NSConcreteStackBlock; */ if (NSConcreteStackBlock_decl == NULL_TREE) { tree name_id = get_identifier("_NSConcreteStackBlock"); NSConcreteStackBlock_decl = lookup_name (name_id); if (!NSConcreteStackBlock_decl) { NSConcreteStackBlock_decl = build_decl (VAR_DECL, name_id, ptr_type_node); DECL_EXTERNAL (NSConcreteStackBlock_decl) = 1; TREE_PUBLIC (NSConcreteStackBlock_decl) = 1; pushdecl_top_level (NSConcreteStackBlock_decl); rest_of_decl_compilation (NSConcreteStackBlock_decl, 0, 0); } } /* APPLE LOCAL begin radar 6457359 */ CONSTRUCTOR_APPEND_ELT(impl_v, NULL_TREE, convert (ptr_type_node, build_fold_addr_expr (NSConcreteStackBlock_decl))); /* APPLE LOCAL end radar 6457359 */ } /* __flags */ CONSTRUCTOR_APPEND_ELT(impl_v, NULL_TREE, build_int_cst (integer_type_node, flags)); /* __reserved */ CONSTRUCTOR_APPEND_ELT(impl_v, NULL_TREE, build_int_cst (integer_type_node, 0)); /* __FuncPtr */ helper_addr = build_fold_addr_expr (block_impl->helper_func_decl); helper_addr = convert (ptr_type_node, helper_addr); CONSTRUCTOR_APPEND_ELT(impl_v, NULL_TREE, helper_addr); /* &static_descriptor_variable initializer */ expr = build_fold_addr_expr (descriptor_block_decl); CONSTRUCTOR_APPEND_ELT(impl_v, NULL_TREE, expr); for (chain = block_impl->block_original_ref_decl_list; chain; chain = TREE_CHAIN (chain)) { tree y = TREE_VALUE (chain); TREE_USED (y) = 1; CONSTRUCTOR_APPEND_ELT(impl_v, NULL_TREE, y); } for (chain = block_impl->block_byref_decl_list; chain; chain = TREE_CHAIN (chain)) { tree y = lookup_name (DECL_NAME (TREE_VALUE (chain))); tree forwarding_expr; gcc_assert (y); TREE_USED (y) = 1; if (COPYABLE_BYREF_LOCAL_VAR (y)) { /* For variables declared __block, either the original one at the point of declaration or the imported version (which is initialized in the helper function's prologue) is used to initilize the byref variable field in the temporary. */ if (TREE_CODE (TREE_TYPE (y)) != RECORD_TYPE) y = build_indirect_ref (y, "unary *"); /* We will be using the __block_struct_variable.__forwarding as the initializer. */ forwarding_expr = build_component_ref (y, get_identifier ("__forwarding")); } else /* Global variable is always assumed passed by its address. */ forwarding_expr = build_fold_addr_expr (y); CONSTRUCTOR_APPEND_ELT(impl_v, NULL_TREE, forwarding_expr); } return impl_v; } /* APPLE LOCAL end radar 6169527 */ /* APPLE LOCAL end radar 5847213 - radar 6329245 */ /** build_block_literal_tmp - This routine: 1) builds block type: struct __block_literal_n { void *__isa; // initialized to &_NSConcreteStackBlock or &_NSConcreteGlobalBlock int __flags; int __reserved; void *__FuncPtr; struct __block_descriptor { unsigned long int reserved; // NULL unsigned long int Size; // sizeof(struct Block_literal_1) // optional helper functions void *CopyFuncPtr; // When BLOCK_HAS_COPY_DISPOSE void *DestroyFuncPtr; // When BLOCK_HAS_COPY_DISPOSE } *__descriptor; // imported variables int x; // ref variable list ... int *y; // byref variable list }; 2) build function prototype: double helper_1(struct block_1 *ii, int z); 3) build the temporary initialization: struct block_1 I = { { &_NSConcreteStackBlock or &_NSConcreteGlobalBlock // isa, BLOCK_HAS_CXX_OBJ | BLOCK_HAS_COPY_DISPOSE | BLOCK_IS_GLOBAL // flags, 0, // reserved, &helper_1, &{ NULL, sizeof(struct block_1), copy_helper_block_1, // only if block BLOCK_HAS_COPY_DISPOSE destroy_helper_block_1, // only if block BLOCK_HAS_COPY_DISPOSE }, x, &y }; It return the temporary. */ /* APPLE LOCAL begin radar 6169527 */ static tree build_block_literal_tmp (const char *name, struct block_sema_info * block_impl) { extern tree create_tmp_var_raw (tree, const char *); tree block_holder_tmp_decl; tree constructor; tree block_struct_type = TREE_TYPE (block_impl->block_arg_ptr_type); /* APPLE LOCAL begin radar 6230297 */ bool staticBlockTmp = (block_impl->block_ref_decl_list == NULL_TREE && block_impl->block_byref_decl_list == NULL_TREE); block_holder_tmp_decl = create_tmp_var_raw (block_struct_type, name); /* Context will not be known until when the literal is synthesized. This is more so in the case of nested block literal blocks. */ maybe_push_decl (block_holder_tmp_decl); DECL_CONTEXT (block_holder_tmp_decl) = staticBlockTmp ? NULL_TREE : current_function_decl; if (staticBlockTmp) DECL_CONTEXT (block_impl->helper_func_decl) = NULL_TREE; /* APPLE LOCAL end radar 6230297 */ DECL_ARTIFICIAL (block_holder_tmp_decl) = 1; /* Create a CONSTRUCTOR to represent the braced-initializer. */ constructor = make_node (CONSTRUCTOR); CONSTRUCTOR_ELTS (constructor) = build_block_struct_initlist (block_struct_type, block_impl); /* Temporary representing a global block is made global static. */ /* APPLE LOCAL radar 6230297 */ if (staticBlockTmp || global_bindings_p ()) { TREE_PUBLIC (block_holder_tmp_decl) = 0; TREE_STATIC (block_holder_tmp_decl) = 1; } cp_finish_decl (block_holder_tmp_decl, constructor, 0, 0, LOOKUP_ONLYCONVERTING); return block_holder_tmp_decl; } /* APPLE LOCAL end radar 6169527 */ static tree clean_and_exit (tree block) { pop_function_context (); pop_lang_context (); if (current_function_decl) free (finish_block (block)); return error_mark_node; } /** synth_copy_helper_block_func - This function synthesizes void copy_helper_block (struct block* _dest, struct block *_src) function. */ static void synth_copy_helper_block_func (struct block_sema_info * block_impl) { tree stmt, chain; tree dst_arg, src_arg; /* struct c_arg_info * arg_info; */ /* Set up: (struct block* _dest, struct block *_src) parameters. */ dst_arg = build_decl (PARM_DECL, get_identifier ("_dst"), block_impl->block_arg_ptr_type); DECL_CONTEXT (dst_arg) = cur_block->copy_helper_func_decl; TREE_USED (dst_arg) = 1; DECL_ARG_TYPE (dst_arg) = block_impl->block_arg_ptr_type; src_arg = build_decl (PARM_DECL, get_identifier ("_src"), block_impl->block_arg_ptr_type); DECL_CONTEXT (src_arg) = cur_block->copy_helper_func_decl; TREE_USED (src_arg) = 1; DECL_ARG_TYPE (src_arg) = block_impl->block_arg_ptr_type; /* arg_info = xcalloc (1, sizeof (struct c_arg_info)); */ TREE_CHAIN (dst_arg) = src_arg; pushdecl (cur_block->copy_helper_func_decl); /* arg_info->parms = dst_arg; */ /* arg_info->types = tree_cons (NULL_TREE, block_impl->block_arg_ptr_type, tree_cons (NULL_TREE, block_impl->block_arg_ptr_type, NULL_TREE)); */ DECL_ARGUMENTS (cur_block->copy_helper_func_decl) = dst_arg; /* function header synthesis. */ push_function_context (); /* start_block_helper_function (cur_block->copy_helper_func_decl, true); */ /* store_parm_decls (arg_info); */ start_preparsed_function (cur_block->copy_helper_func_decl, /*attrs*/NULL_TREE, SF_PRE_PARSED); /* Body of the function. */ stmt = begin_compound_stmt (BCS_FN_BODY); for (chain = block_impl->block_ref_decl_list; chain; chain = TREE_CHAIN (chain)) /* APPLE LOCAL radar 6214617 */ if (cp_block_requires_copying (TREE_VALUE (chain))) { /* APPLE LOCAL begin radar 6175959 */ int flag = 0; tree p = TREE_VALUE (chain); tree dst_block_component, src_block_component; dst_block_component = build_component_ref (build_indirect_ref (dst_arg, "->"), DECL_NAME (p)); src_block_component = build_component_ref (build_indirect_ref (src_arg, "->"), DECL_NAME (p)); if (TREE_CODE (TREE_TYPE (p)) == BLOCK_POINTER_TYPE) /* _Block_object_assign(&_dest->myImportedBlock, _src->myImportedClosure, BLOCK_FIELD_IS_BLOCK) */ flag = BLOCK_FIELD_IS_BLOCK; /* APPLE LOCAL begin radar 6214617 */ else if (TYPE_HAS_CONSTRUCTOR (TREE_TYPE (p)) || TYPE_NEEDS_CONSTRUCTING (TREE_TYPE (p))) { tree call_exp = build_aggr_init (dst_block_component, src_block_component, LOOKUP_ONLYCONVERTING); add_stmt (call_exp); } /* APPLE LOCAL end radar 6214617 */ else /* _Block_object_assign(&_dest->myImportedBlock, _src->myImportedClosure, BLOCK_FIELD_IS_OBJECT) */ flag = BLOCK_FIELD_IS_OBJECT; if (flag) { tree call_exp; dst_block_component = build_fold_addr_expr (dst_block_component); call_exp = build_block_object_assign_call_exp (dst_block_component, src_block_component, flag); add_stmt (call_exp); } /* APPLE LOCAL end radar 6175959 */ } /* For each __block declared variable used in |...| Must generate call to: _Block_object_assign(&_dest->myImportedBlock, _src->myImportedBlock, BLOCK_FIELD_IS_BYREF [|BLOCK_FIELD_IS_WEAK]) */ for (chain = block_impl->block_byref_decl_list; chain; chain = TREE_CHAIN (chain)) if (COPYABLE_BYREF_LOCAL_VAR (TREE_VALUE (chain))) { int flag = BLOCK_FIELD_IS_BYREF; tree call_exp; tree p = TREE_VALUE (chain); tree dst_block_component, src_block_component; dst_block_component = build_component_ref (build_indirect_ref (dst_arg, "->"), DECL_NAME (p)); src_block_component = build_component_ref (build_indirect_ref (src_arg, "->"), DECL_NAME (p)); /* _Block_object_assign(&_dest->myImportedClosure, _src->myImportedClosure, BLOCK_FIELD_IS_BYREF [|BLOCK_FIELD_IS_WEAK]) */ if (COPYABLE_WEAK_BLOCK (p)) flag |= BLOCK_FIELD_IS_WEAK; dst_block_component = build_fold_addr_expr (dst_block_component); call_exp = build_block_object_assign_call_exp (dst_block_component, src_block_component, flag); add_stmt (call_exp); } finish_compound_stmt (stmt); /* APPLE LOCAL radar 6169580 */ finish_function (4); /* Hum, would be nice if someone else did this for us. */ if (global_bindings_p ()) cgraph_finalize_function (block_impl->copy_helper_func_decl, false); pop_function_context (); /* free (arg_info); */ } static void synth_destroy_helper_block_func (struct block_sema_info * block_impl) { tree stmt, chain; tree src_arg; /* struct c_arg_info * arg_info; */ /* Set up: (struct block *_src) parameter. */ src_arg = build_decl (PARM_DECL, get_identifier ("_src"), block_impl->block_arg_ptr_type); DECL_CONTEXT (src_arg) = cur_block->destroy_helper_func_decl; TREE_USED (src_arg) = 1; DECL_ARG_TYPE (src_arg) = block_impl->block_arg_ptr_type; /* arg_info = xcalloc (1, sizeof (struct c_arg_info)); */ pushdecl (cur_block->destroy_helper_func_decl); /* arg_info->parms = src_arg; */ /* arg_info->types = tree_cons (NULL_TREE, block_impl->block_arg_ptr_type, NULL_TREE); */ DECL_ARGUMENTS (cur_block->destroy_helper_func_decl) = src_arg; /* function header synthesis. */ push_function_context (); /* start_block_helper_function (cur_block->destroy_helper_func_decl, true); */ /* store_parm_decls_from (arg_info); */ start_preparsed_function (cur_block->destroy_helper_func_decl, /*attrs*/NULL_TREE, SF_PRE_PARSED); /* Body of the function. */ stmt = begin_compound_stmt (BCS_FN_BODY); for (chain = block_impl->block_ref_decl_list; chain; chain = TREE_CHAIN (chain)) /* APPLE LOCAL begin radar 6214617 */ if (block_requires_copying (TREE_VALUE (chain)) || (TREE_CODE (TREE_TYPE (TREE_VALUE (chain))) == RECORD_TYPE && CLASSTYPE_DESTRUCTORS (TREE_TYPE (TREE_VALUE (chain))))) /* APPLE LOCAL end radar 6214617 */ { int flag = 0; tree rel_exp; tree p = TREE_VALUE (chain); tree src_block_component; src_block_component = build_component_ref (build_indirect_ref (src_arg, "->"), DECL_NAME (p)); if (TREE_CODE (TREE_TYPE (p)) == BLOCK_POINTER_TYPE) /* _Block_object_dispose(_src->imported_object_0, BLOCK_FIELD_IS_BLOCK); */ flag = BLOCK_FIELD_IS_BLOCK; /* APPLE LOCAL begin radar 6214617 */ else if (TREE_CODE (TREE_TYPE (p)) == RECORD_TYPE && CLASSTYPE_DESTRUCTORS (TREE_TYPE (p))) { tree call_exp = cxx_maybe_build_cleanup (src_block_component); gcc_assert (call_exp); add_stmt (call_exp); } /* APPLE LOCAL end radar 6214617 */ else /* _Block_object_dispose(_src->imported_object_0, BLOCK_FIELD_IS_OBJECT); */ flag = BLOCK_FIELD_IS_OBJECT; if (flag) { rel_exp = build_block_object_dispose_call_exp (src_block_component, flag); add_stmt (rel_exp); } } /* For each __block declared variable used in |...| Must generate call to: _Block_object_dispose(_src->myImportedClosure, BLOCK_FIELD_IS_BYREF[|BLOCK_FIELD_IS_WEAK]) */ for (chain = block_impl->block_byref_decl_list; chain; chain = TREE_CHAIN (chain)) if (COPYABLE_BYREF_LOCAL_VAR (TREE_VALUE (chain))) { tree call_exp; int flag = BLOCK_FIELD_IS_BYREF; tree p = TREE_VALUE (chain); tree src_block_component; src_block_component = build_component_ref (build_indirect_ref (src_arg, "->"), DECL_NAME (p)); if (COPYABLE_WEAK_BLOCK (p)) flag |= BLOCK_FIELD_IS_WEAK; /* _Block_object_dispose(_src->myImportedClosure, BLOCK_FIELD_IS_BYREF[|BLOCK_FIELD_IS_WEAK]) */ call_exp = build_block_object_dispose_call_exp (src_block_component, flag); add_stmt (call_exp); } finish_compound_stmt (stmt); /* APPLE LOCAL radar 6169580 */ finish_function (4); /* Hum, would be nice if someone else did this for us. */ if (global_bindings_p ()) cgraph_finalize_function (block_impl->destroy_helper_func_decl, false); pop_function_context (); } /* Parse a block-id. GNU Extension: block-id: type-specifier-seq block-declarator Returns the DECL specified or implied. */ static tree cp_parser_block_id (cp_parser* parser) { cp_decl_specifier_seq type_specifier_seq; cp_declarator *declarator; /* Parse the type-specifier-seq. */ cp_parser_type_specifier_seq (parser, /*is_condition=*/false, &type_specifier_seq); if (type_specifier_seq.type == error_mark_node) return error_mark_node; /* Look for the block-declarator. */ declarator = cp_parser_declarator (parser, CP_PARSER_DECLARATOR_BLOCK, NULL, /*parenthesized_p=*/NULL, /*member_p=*/false); return grokblockdecl (&type_specifier_seq, declarator); } /* Parse a block-literal-expr. GNU Extension: block-literal-expr: ^ parameter-declation-clause exception-specification [opt] compound-statement ^ block-id compound-statement It synthesizes the helper function for later generation and builds the necessary data to represent the block literal where it is declared. */ static tree cp_parser_block_literal_expr (cp_parser* parser) { char name [32]; static int global_unique_count; int unique_count = ++global_unique_count; tree block_helper_function_decl; tree expr, type, arglist = NULL_TREE, ftype; tree self_arg, stmt; /* struct c_arg_info *args = NULL; */ cp_parameter_declarator *args = NULL; tree arg_type = void_list_node; struct block_sema_info *block_impl; tree tmp; tree restype; tree typelist; tree helper_function_type; tree block; /* APPLE LOCAL radar 6185344 */ tree declared_block_return_type = NULL_TREE; /* APPLE LOCAL radar 6237713 */ tree attributes = NULL_TREE; /* APPLE LOCAL radar 6169580 */ int context_is_nonstatic_method; tree raises = NULL_TREE; cp_lexer_consume_token (parser->lexer); /* eat '^' */ /* APPLE LOCAL begin radar 6237713 */ if (cp_lexer_peek_token (parser->lexer)->keyword == RID_ATTRIBUTE) attributes = cp_parser_attributes_opt (parser); /* APPLE LOCAL end radar 6237713 */ if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN)) { /* Parse the optional argument list */ cp_lexer_consume_token (parser->lexer); /* Open the scope to collect parameter decls */ /* push_scope (); */ /* args = c_parser_parms_declarator (parser, true, NULL_TREE); */ /* Parse the parameter-declaration-clause. */ args = cp_parser_parameter_declaration_clause (parser); cp_parser_require (parser, CPP_CLOSE_PAREN, "`)'"); arg_type = grokparms (args, &arglist); /* Check for args as it might be NULL due to error. */ if (! args) { return error_mark_node; } raises = cp_parser_exception_specification_opt (parser); } /* APPLE LOCAL begin radar 6185344 */ else if (cp_lexer_next_token_is_not (parser->lexer, CPP_OPEN_BRACE)) { /* Parse user declared return type. */ tree decl; /* APPLE LOCAL begin radar 6237713 */ if (attributes) { warning (0, "attributes before block type are ignored"); attributes = NULL_TREE; } /* APPLE LOCAL end radar 6237713 */ decl = cp_parser_block_id (parser); if (decl && decl != error_mark_node) { arg_type = TYPE_ARG_TYPES (TREE_TYPE (decl)); arglist = DECL_ARGUMENTS (decl); raises = TYPE_RAISES_EXCEPTIONS (TREE_TYPE (decl)); declared_block_return_type = TREE_TYPE (TREE_TYPE (decl)); } } /* APPLE LOCAL end radar 6185344 */ block = begin_block (); /* APPLE LOCAL begin radar 6169580 */ context_is_nonstatic_method = (current_function_decl && DECL_NONSTATIC_MEMBER_FUNCTION_P (current_function_decl)); /* APPLE LOCAL end radar 6169580 */ /* cur_block->arg_info = NULL; */ /* APPLE LOCAL begin radar 6185344 */ if (declared_block_return_type) { cur_block->return_type = TYPE_MAIN_VARIANT (declared_block_return_type); cur_block->block_has_return_type = true; } else cur_block->return_type = NULL_TREE; /* APPLE LOCAL end radar 6185344 */ /* Must also build hidden parameter .block_descriptor added to the helper function, even though we do not know its type yet. */ /* APPLE LOCAL radar 6404979 */ self_arg = build_artificial_parm (get_identifier (".block_descriptor"), ptr_type_node); /* TREE_CHAIN (self_arg) = cur_block->arg_info->parms; */ TREE_CHAIN (self_arg) = arglist; arg_type = tree_cons (NULL_TREE, ptr_type_node, arg_type); arglist = self_arg; /* APPLE LOCAL begin radar 6185344 */ /* Build the declaration of the helper function (if we do not know its result type yet, assume it is 'void'. If user provided it, use it). Treat this as a nested function and use nested function infrastructure for its generation. */ push_lang_context (lang_name_c); ftype = build_function_type ((!cur_block->block_has_return_type ? void_type_node : cur_block->return_type), arg_type); /* APPLE LOCAL end radar 6185344 */ if (raises) ftype = build_exception_variant (ftype, raises); /* APPLE LOCAL radar 6160536 */ block_helper_function_decl = build_helper_func_decl (build_block_helper_name (unique_count), ftype); DECL_CONTEXT (block_helper_function_decl) = current_function_decl; /* LLVM LOCAL begin 6530487 - blocks helper functions never need a static chain */ #ifdef ENABLE_LLVM DECL_NO_STATIC_CHAIN (block_helper_function_decl) = 1; #endif /* LLVM LOCAL end 6530487 - blocks helper functions never need a static chain */ cur_block->helper_func_decl = block_helper_function_decl; DECL_ARGUMENTS (block_helper_function_decl) = arglist; push_function_context (); /* start_block_helper_function (cur_block->helper_func_decl, false); */ /* Enter parameter list to the scope of the helper function. */ /* store_parm_decls_from (cur_block->arg_info); */ start_preparsed_function (cur_block->helper_func_decl, /*attrs*/NULL_TREE, SF_PRE_PARSED); /* APPLE LOCAL begin radar 6237713 */ if (cp_lexer_peek_token (parser->lexer)->keyword == RID_ATTRIBUTE) attributes = cp_parser_attributes_opt (parser); /* APPLE LOCAL radar 6246527 */ any_recognized_block_attribute (attributes); decl_attributes (&cur_block->helper_func_decl, attributes, 0); /* APPLE LOCAL end radar 6237713 */ /* Start parsing body or expression part of the block literal. */ { unsigned save = parser->in_statement; /* Indicate no valid break/continue context. We'll notice and emit the proper error message in c_finish_bc_stmt. */ parser->in_statement = 0; stmt = begin_compound_stmt (BCS_FN_BODY); /* Set block's scope to the scope of the helper function's main body. This is primarily used when nested blocks are declared. */ cur_block->cp_the_scope = current_binding_level; /* APPLE LOCAL begin radar 6169580 */ if (context_is_nonstatic_method) { tree this_decl = lookup_name (this_identifier); gcc_assert (this_decl); build_block_ref_decl (this_identifier, this_decl); } /* APPLE LOCAL end radar 6169580 */ cp_parser_compound_statement (parser, NULL, false, false); parser->in_statement = save; } cur_block->block_arg_ptr_type = build_pointer_type (build_block_struct_type (cur_block)); restype = !cur_block->return_type ? void_type_node : cur_block->return_type; if (restype == error_mark_node) return clean_and_exit (block); /* Now that we know type of the hidden .block_descriptor argument, fix its type. */ TREE_TYPE (self_arg) = cur_block->block_arg_ptr_type; DECL_ARG_TYPE (self_arg) = cur_block->block_arg_ptr_type; /* The DECL_RESULT should already have the correct type by now. */ gcc_assert (TREE_TYPE (DECL_RESULT (current_function_decl)) == restype); cur_block->block_body = stmt; block_build_prologue (cur_block); finish_compound_stmt (stmt); /* add_stmt (fnbody); */ /* We are done parsing of the block body. Return type of block is now known. We also know all we need to know about the helper function. So, fix its type here. */ /* We moved this here because for global blocks, helper function body is not nested and is gimplified in call to finish_function() and return type of the function must be correct. */ ftype = build_function_type (restype, TREE_CHAIN (arg_type)); if (raises) ftype = build_exception_variant (ftype, raises); /* Declare helper function; as in: double helper_1(struct block_1 *ii, int z); */ typelist = TYPE_ARG_TYPES (ftype); /* (struct block_1 *ii, int z, ...) */ typelist = tree_cons (NULL_TREE, cur_block->block_arg_ptr_type, typelist); helper_function_type = build_function_type (TREE_TYPE (ftype), typelist); if (raises) helper_function_type = build_exception_variant (helper_function_type, raises); TREE_TYPE (cur_block->helper_func_decl) = helper_function_type; finish_function (4); pop_function_context (); /* Hum, would be nice if someone else did this for us. */ if (global_bindings_p ()) cgraph_finalize_function (cur_block->helper_func_decl, false); pop_lang_context (); /* Build the declaration for copy_helper_block and destroy_helper_block helper functions for later use. */ if (cur_block->BlockHasCopyDispose) { tree s_ftype; push_lang_context (lang_name_c); /* void copy_helper_block (struct block*, struct block *); */ s_ftype = build_function_type (void_type_node, tree_cons (NULL_TREE, cur_block->block_arg_ptr_type, tree_cons (NULL_TREE, cur_block->block_arg_ptr_type, void_list_node))); sprintf (name, "__copy_helper_block_%d", unique_count); cur_block->copy_helper_func_decl = build_helper_func_decl (get_identifier (name), s_ftype); DECL_CONTEXT (cur_block->copy_helper_func_decl) = current_function_decl; synth_copy_helper_block_func (cur_block); /* LLVM LOCAL begin Copy helper function should not have source location. */ DECL_SOURCE_FILE (cur_block->copy_helper_func_decl) = NULL; DECL_SOURCE_LINE (cur_block->copy_helper_func_decl) = 0; /* LLVM LOCAL end Copy helper function should not have source location. */ /* void destroy_helper_block (struct block*); */ s_ftype = build_function_type (void_type_node, tree_cons (NULL_TREE, cur_block->block_arg_ptr_type, void_list_node)); sprintf (name, "__destroy_helper_block_%d", unique_count); cur_block->destroy_helper_func_decl = build_helper_func_decl (get_identifier (name), s_ftype); DECL_CONTEXT (cur_block->destroy_helper_func_decl) = current_function_decl; synth_destroy_helper_block_func (cur_block); /* LLVM LOCAL begin Destroy helper function should not have source location. */ DECL_SOURCE_FILE (cur_block->destroy_helper_func_decl) = NULL; DECL_SOURCE_LINE (cur_block->destroy_helper_func_decl) = 0; /* LLVM LOCAL end Destroy helper function should not have source location. */ pop_lang_context (); } block_impl = finish_block (block); /* Build unqiue name of the temporary used in code gen. */ sprintf (name, "__block_holder_tmp_%d", unique_count); tmp = build_block_literal_tmp (name, block_impl); tmp = build_fold_addr_expr (tmp); type = build_block_pointer_type (ftype); expr = convert (type, convert (ptr_type_node, tmp)); free (block_impl); return expr; } /* APPLE LOCAL end blocks 6040305 (ce) */ /* APPLE LOCAL begin blocks 6040305 (ch) */ /* build_byref_local_var_access - converts EXPR to: EXPR.__forwarding-><decl-name>. */ tree build_byref_local_var_access (tree expr, tree decl_name) { tree exp = build_component_ref (expr, get_identifier ("__forwarding")); exp = build_indirect_ref (exp, "unary *"); exp = build_component_ref (exp, decl_name); return exp; } #define BINDING_VALUE(b) ((b)->value) /** build_block_byref_decl - This routine inserts a variable declared as a 'byref' variable using the |...| syntax in helper function's outer-most scope. */ tree build_block_byref_decl (tree name, tree decl, tree exp) { tree ptr_type, byref_decl; /* APPLE LOCAL begin radar 6225809 */ if (cur_block->prev_block_info) { /* Traverse enclosing blocks. Insert a __block variable in each enclosing block which has no declaration of this variable. This is to ensure that the current (inner) block gets the __block version of the variable; */ struct block_sema_info *cb = cur_block->prev_block_info; while (cb) { struct cxx_binding *b = I_SYMBOL_BINDING (name); gcc_assert (b); gcc_assert (BINDING_VALUE (b)); gcc_assert (TREE_CODE (BINDING_VALUE (b)) == VAR_DECL || TREE_CODE (BINDING_VALUE (b)) == PARM_DECL); /* Find the first declaration not in current block. */ while (b && BINDING_VALUE (b) && (TREE_CODE (BINDING_VALUE (b)) == VAR_DECL || TREE_CODE (BINDING_VALUE (b)) == PARM_DECL) && DECL_CONTEXT (BINDING_VALUE (b)) == cur_block->helper_func_decl) { /* FIXME: This can't happen?! */ abort (); /* b = b->previous; */ } gcc_assert (b); gcc_assert (BINDING_VALUE (b)); gcc_assert (TREE_CODE (BINDING_VALUE (b)) == VAR_DECL || TREE_CODE (BINDING_VALUE (b)) == PARM_DECL); /* Is the next declaration not in the enclosing block? */ if (b && BINDING_VALUE (b) && (TREE_CODE (BINDING_VALUE (b)) == VAR_DECL || TREE_CODE (BINDING_VALUE (b)) == PARM_DECL) && DECL_CONTEXT (BINDING_VALUE (b)) != cb->helper_func_decl) { /* No declaration of variable seen in the block. Must insert one. */ /* FIXME: does this push enough? scope? */ struct cp_binding_level *save_scope = current_binding_level; struct block_sema_info *save_current_block = cur_block; tree save_current_function_decl = current_function_decl; current_binding_level = cb->cp_the_scope; cur_block = cb; current_function_decl = cb->helper_func_decl; decl = build_block_byref_decl (name, decl, exp); cur_block = save_current_block; current_binding_level = save_scope; current_function_decl = save_current_function_decl; } cb = cb->prev_block_info; } } /* APPLE LOCAL end radar 6225809 */ /* If it is already a byref declaration, do not add the pointer type because such declarations already have the pointer type added. This happens when we have two nested byref declarations in nested blocks. */ ptr_type = (TREE_CODE (decl) == VAR_DECL && BLOCK_DECL_BYREF (decl)) ? TREE_TYPE (decl) : build_pointer_type (TREE_TYPE (decl)); byref_decl = build_decl (VAR_DECL, name, ptr_type); DECL_CONTEXT (byref_decl) = current_function_decl; BLOCK_DECL_BYREF (byref_decl) = 1; if (TREE_CODE (decl) == VAR_DECL && COPYABLE_BYREF_LOCAL_VAR (decl)) { COPYABLE_BYREF_LOCAL_VAR (byref_decl) = 1; COPYABLE_BYREF_LOCAL_NONPOD (byref_decl) = COPYABLE_BYREF_LOCAL_NONPOD (decl); /* APPLE LOCAL radar 5847976 */ COPYABLE_WEAK_BLOCK (byref_decl) = COPYABLE_WEAK_BLOCK (decl); } /* Current scope must be that of the main function body. */ /* FIXME gcc_assert (current_scope->function_body);*/ /* LLVM LOCAL begin 7387470 */ /* Find the scope for function body (outer-most scope) and insert this variable in that scope. This is to avoid duplicate declaration of the save variable. */ { struct cp_binding_level *b = current_binding_level; while (b->level_chain->kind != sk_function_parms) b = b->level_chain; pushdecl_with_scope (byref_decl, b, /*is_friend=*/false); } /* LLVM LOCAL end 7387470 */ mark_used (byref_decl); /* APPLE LOCAL begin radar 6083129 - byref escapes (cp) */ /* FIXME: finish this off, ensure the decl is scoped appropriately for when we want the cleanup to run. */ if (! flag_objc_gc_only) push_cleanup (byref_decl, build_block_byref_release_exp (byref_decl), false); /* APPLE LOCAL end radar 6083129 - byref escapes (cp) */ cur_block->block_byref_decl_list = tree_cons (NULL_TREE, byref_decl, cur_block->block_byref_decl_list); /* APPLE LOCAL radar 5847213 */ /* build of block_original_byref_decl_list us removed. */ /* APPLE LOCAL begin radar 6144664 */ DECL_SOURCE_LOCATION (byref_decl) = DECL_SOURCE_LOCATION (cur_block->helper_func_decl); /* APPLE LOCAL end radar 6144664 */ return byref_decl; } /** build_block_ref_decl - This routine inserts a copied-in variable (a variable referenced in the block but whose scope is outside the block) in helper function's outer-most scope. It also sets its type to 'const' as such variables are read-only. */ tree build_block_ref_decl (tree name, tree decl) { /* FIXME - Broken, should be found via objc runtime testcases. */ /* FIXME - Don't use DECL_CONTEXT on any helpers */ tree ref_decl; /* APPLE LOCAL radar 6212722 */ tree type, exp; /* 'decl' was previously declared as __block. Simply, copy the value embedded in the above variable. */ if (TREE_CODE (decl) == VAR_DECL && COPYABLE_BYREF_LOCAL_VAR (decl)) decl = build_byref_local_var_access (decl, DECL_NAME (decl)); else { if (cur_block->prev_block_info) { /* Traverse enclosing blocks. Insert a copied-in variable in each enclosing block which has no declaration of this variable. This is to ensure that the current (inner) block has the 'frozen' value of the copied-in variable; which means the value of the copied in variable is at the point of the block declaration and *not* when the inner block is invoked. */ struct block_sema_info *cb = cur_block->prev_block_info; while (cb) { struct cxx_binding *b = I_SYMBOL_BINDING (name); gcc_assert (b); gcc_assert (BINDING_VALUE (b)); gcc_assert (TREE_CODE (BINDING_VALUE (b)) == VAR_DECL || TREE_CODE (BINDING_VALUE (b)) == PARM_DECL); /* Find the first declaration not in current block. */ while (b && BINDING_VALUE (b) && (TREE_CODE (BINDING_VALUE (b)) == VAR_DECL || TREE_CODE (BINDING_VALUE (b)) == PARM_DECL) && DECL_CONTEXT (BINDING_VALUE (b)) == cur_block->helper_func_decl) { /* FIXME: This can't happen?! */ abort (); /* b = b->previous; */ } gcc_assert (b); gcc_assert (BINDING_VALUE (b)); gcc_assert (TREE_CODE (BINDING_VALUE (b)) == VAR_DECL || TREE_CODE (BINDING_VALUE (b)) == PARM_DECL); /* Is the next declaration not in the enclosing block? */ if (b && BINDING_VALUE (b) && (TREE_CODE (BINDING_VALUE (b)) == VAR_DECL || TREE_CODE (BINDING_VALUE (b)) == PARM_DECL) && DECL_CONTEXT (BINDING_VALUE (b)) != cb->helper_func_decl) { /* No declaration of variable seen in the block. Must insert one, so it 'freezes' the variable in this block. */ /* FIXME: does this push enough? scope? */ struct cp_binding_level *save_scope = current_binding_level; struct block_sema_info *save_current_block = cur_block; tree save_current_function_decl = current_function_decl; current_binding_level = cb->cp_the_scope; cur_block = cb; current_function_decl = cb->helper_func_decl; decl = build_block_ref_decl (name, decl); cur_block = save_current_block; current_binding_level = save_scope; current_function_decl = save_current_function_decl; } cb = cb->prev_block_info; } } } /* APPLE LOCAL begin radar 6212722 */ exp = decl; type = TREE_TYPE (exp); if (TREE_CODE (type) == ARRAY_TYPE || TREE_CODE (type) == FUNCTION_TYPE) { exp = decay_conversion (exp); type = TREE_TYPE (exp); } ref_decl = build_decl (VAR_DECL, name, build_qualified_type (type, TYPE_QUAL_CONST)); /* APPLE LOCAL end radar 6212722 */ /* APPLE LOCAL begin radar 6144664 */ DECL_SOURCE_LOCATION (ref_decl) = DECL_SOURCE_LOCATION (cur_block->helper_func_decl); /* APPLE LOCAL end radar 6144664 */ DECL_CONTEXT (ref_decl) = current_function_decl; DECL_INITIAL (ref_decl) = error_mark_node; c_apply_type_quals_to_decl (TYPE_QUAL_CONST, ref_decl); BLOCK_DECL_COPIED (ref_decl) = 1; /* Find the scope for function body (outer-most scope) and insert this variable in that scope. This is to avoid duplicate declaration of the save variable. */ { struct cp_binding_level *b = current_binding_level; while (b->level_chain->kind != sk_function_parms) b = b->level_chain; pushdecl_with_scope (ref_decl, b, /*is_friend=*/false); /* APPLE LOCAL radar 6169527 */ add_decl_expr (ref_decl); } cur_block->block_ref_decl_list = tree_cons (NULL_TREE, ref_decl, cur_block->block_ref_decl_list); cur_block->block_original_ref_decl_list = /* APPLE LOCAL radar 6212722 */ tree_cons (NULL_TREE, exp, cur_block->block_original_ref_decl_list); return ref_decl; } /* APPLE LOCAL begin radar 5847213 - radar 6329245 */ static GTY (()) tree descriptor_ptr_type; static GTY (()) tree descriptor_ptr_type_with_copydispose; /** build_block_descriptor_type - This routine builds following internal type: struct __block_descriptor { unsigned long int reserved; // NULL unsigned long int Size; // sizeof(struct Block_literal_1) // optional helper functions void *CopyFuncPtr; // When BLOCK_HAS_COPY_DISPOSE is set (withCopyDispose true) void *DestroyFuncPtr; // When BLOCK_HAS_COPY_DISPOSE is set (withCopyDispose true) // APPLE LOCAL begin radar 8143947 const char *signature; // the block signature const char *layout; // reserved // APPLE LOCAL end radar 8143947 } *descriptor_ptr_type; Objects of this type will always be static. This is one main component of abi change. */ tree build_block_descriptor_type (bool withCopyDispose) { tree field_decl_chain = NULL_TREE, field_decl; tree main_type; if (withCopyDispose && descriptor_ptr_type_with_copydispose) return descriptor_ptr_type_with_copydispose; if (!withCopyDispose && descriptor_ptr_type) return descriptor_ptr_type; main_type = make_aggr_type (RECORD_TYPE); xref_basetypes (main_type, NULL_TREE); /* unsigned long int reserved; */ field_decl = build_decl (FIELD_DECL, get_identifier ("reserved"), long_unsigned_type_node); TREE_CHAIN (field_decl) = field_decl_chain; field_decl_chain = field_decl; /* unsigned long int Size; */ field_decl = build_decl (FIELD_DECL, get_identifier ("Size"), long_unsigned_type_node); TREE_CHAIN (field_decl) = field_decl_chain; field_decl_chain = field_decl; if (withCopyDispose) { /* void *CopyFuncPtr; */ field_decl = build_decl (FIELD_DECL, get_identifier ("CopyFuncPtr"), ptr_type_node); TREE_CHAIN (field_decl) = field_decl_chain; field_decl_chain = field_decl; /* void *DestroyFuncPtr; */ field_decl = build_decl (FIELD_DECL, get_identifier ("DestroyFuncPtr"), ptr_type_node); TREE_CHAIN (field_decl) = field_decl_chain; field_decl_chain = field_decl; } /* APPLE LOCAL begin radar 8143947 */ /* char * signature */ field_decl = build_decl (FIELD_DECL, get_identifier ("signature"), build_pointer_type (char_type_node)); TREE_CHAIN (field_decl) = field_decl_chain; field_decl_chain = field_decl; /* char * layout */ field_decl = build_decl (FIELD_DECL, get_identifier ("layout"), build_pointer_type (char_type_node)); TREE_CHAIN (field_decl) = field_decl_chain; field_decl_chain = field_decl; /* APPLE LOCAL end radar 8143947 */ /* Mark this struct as being a block struct rather than a 'normal' struct. */ TYPE_BLOCK_IMPL_STRUCT (main_type) = 1; if (withCopyDispose) finish_builtin_struct (main_type, "__block_descriptor_withcopydispose", field_decl_chain, NULL_TREE); else finish_builtin_struct (main_type, "__block_descriptor", field_decl_chain, NULL_TREE); CLASSTYPE_AS_BASE (main_type) = main_type; main_type = build_pointer_type (main_type); if (withCopyDispose) descriptor_ptr_type_with_copydispose = main_type; else descriptor_ptr_type = main_type; return main_type; } /* APPLE LOCAL end radar 5847213 - radar 6329245 */ cp_declarator * make_block_pointer_declarator (tree attributes, cp_cv_quals quals, cp_declarator *target) { struct cp_declarator *itarget = target; struct cp_declarator *ret = make_declarator (cdk_block_pointer); /* APPLE LOCAL radar 5847213 */ /* code removed */ ret->attributes = attributes; ret->declarator = itarget; ret->u.block_pointer.qualifiers = quals; return ret; } /* This routine returns 'true' if 'name' has a declaration inside the current block, 'false' otherwise. If 'name' has no declaration in the current block, it returns in DECL the user declaration for 'name' found in the enclosing scope. Note that if it is declared in current declaration, it can be either a user declaration or a byref/copied-in declaration added in current block's scope by the compiler. */ bool lookup_name_in_block (tree name, tree *decl) { /* FIXME - Broken, should be found via objc runtime testcases. */ /* FIXME - Don't use DECL_CONTEXT on any helpers */ cxx_binding *b = I_SYMBOL_BINDING (name); if (b && b->declared_in_block && DECL_CONTEXT (BINDING_VALUE (b)) == current_function_decl) return true; /* Check for variables only, as we may have parameters, such as 'self' */ /* Note that if a copied-in variable (BLOCK_DECL_COPIED) in the enclosing block is found, it must be returned as this is where the variable in current (nested block) will have to get its value. */ while (b && TREE_CODE (BINDING_VALUE (b)) == VAR_DECL && (BLOCK_DECL_BYREF (BINDING_VALUE (b)))) b = b->previous; if (b) *decl = BINDING_VALUE (b); return false; } /** build_helper_func_decl - This routine builds a FUNCTION_DECL for a block helper function. */ tree build_helper_func_decl (tree ident, tree type) { tree func_decl = build_decl (FUNCTION_DECL, ident, type); DECL_EXTERNAL (func_decl) = 0; TREE_PUBLIC (func_decl) = 0; TREE_USED (func_decl) = 1; TREE_NOTHROW (func_decl) = 0; /* APPLE LOCAL radar 6172148 */ BLOCK_SYNTHESIZED_FUNC (func_decl) = 1; retrofit_lang_decl (func_decl); if (current_function_decl) DECL_NO_STATIC_CHAIN (current_function_decl) = 0; return func_decl; } /** declare_block_prologue_local_vars - utility routine to do the actual declaration and initialization for each referecned block variable. */ /* APPLE LOCAL begin radar 6169527 */ /* This routine is mostly rewritten for c++ because initialization of variables may involve copy construction. */ static void declare_block_prologue_local_vars (tree self_parm, tree component, tree stmt) { tree decl, block_component; tree_stmt_iterator i; tree initialization_stmt; /* APPLE LOCAL radar 6163705 */ int save_line = LOCATION_LINE (input_location); decl = component; block_component = build_component_ref (build_indirect_ref (self_parm, "->"), DECL_NAME (component)); gcc_assert (block_component); /* APPLE LOCAL radar 6163705 */ LOCATION_LINE (input_location) = DECL_SOURCE_LINE (decl) - 1; DECL_EXTERNAL (decl) = 0; TREE_STATIC (decl) = 0; TREE_USED (decl) = 1; DECL_CONTEXT (decl) = current_function_decl; DECL_ARTIFICIAL (decl) = 1; initialization_stmt = push_stmt_list(); cp_finish_decl (decl, block_component, 0, 0, LOOKUP_ONLYCONVERTING); initialization_stmt = pop_stmt_list (initialization_stmt); /* APPLE LOCAL radar 6163705 */ LOCATION_LINE (input_location) = save_line; /* Prepend a initialization_stmt statement to the statement list. */ i = tsi_start (stmt); tsi_link_before (&i, initialization_stmt, TSI_SAME_STMT); } /** declare_block_prologue_local_byref_vars - utility routine to do the actual declaration and initialization for each __block referenced block variable. */ static void declare_block_prologue_local_byref_vars (tree self_parm, tree component, tree stmt) { tree decl, block_component; tree_stmt_iterator i; tree decl_stmt; decl = component; block_component = build_component_ref (build_indirect_ref (self_parm, "->"), DECL_NAME (component)); gcc_assert (block_component); DECL_EXTERNAL (decl) = 0; TREE_STATIC (decl) = 0; TREE_USED (decl) = 1; DECL_CONTEXT (decl) = current_function_decl; DECL_ARTIFICIAL (decl) = 1; DECL_INITIAL (decl) = block_component; /* Prepend a DECL_EXPR statement to the statement list. */ i = tsi_start (stmt); decl_stmt = build_stmt (DECL_EXPR, decl); SET_EXPR_LOCATION (decl_stmt, DECL_SOURCE_LOCATION (decl)); /* APPLE LOCAL begin radar 6163705, Blocks prologues */ /* Give the prologue statements a line number of one before the beginning of the function, to make them easily identifiable later. */ EXPR_LINENO (decl_stmt) = DECL_SOURCE_LINE (decl) - 1; /* APPLE LOCAL end radar 6163705, Blocks prologues */ decl_stmt = build3 (BIND_EXPR, void_type_node, decl, decl_stmt, NULL); TREE_SIDE_EFFECTS (decl_stmt) = 1; tsi_link_before (&i, decl_stmt, TSI_SAME_STMT); } /* APPLE LOCAL end radar 6169527 */ /** block_build_prologue - This routine builds the declarations for the variables referenced in the block; as in: int *y = .block_descriptor->y; int x = .block_descriptor->x; The decl_expr declaration for each initialization is enterred at the beginning of the helper function's statement-list which is passed in block_impl->block_body. */ void block_build_prologue (struct block_sema_info *block_impl) { tree chain; tree self_parm = lookup_name (get_identifier (".block_descriptor")); gcc_assert (self_parm); for (chain = block_impl->block_ref_decl_list; chain; chain = TREE_CHAIN (chain)) declare_block_prologue_local_vars (self_parm, TREE_VALUE (chain), block_impl->block_body); /* APPLE LOCAL begin radar 6169527 */ for (chain = block_impl->block_byref_decl_list; chain; chain = TREE_CHAIN (chain)) declare_block_prologue_local_byref_vars (self_parm, TREE_VALUE (chain), block_impl->block_body); /* APPLE LOCAL end radar 6169527 */ } /* APPLE LOCAL end blocks 6040305 (ch) */ /* OpenMP 2.5 parsing routines. */ /* All OpenMP clauses. OpenMP 2.5. */ typedef enum pragma_omp_clause { PRAGMA_OMP_CLAUSE_NONE = 0, PRAGMA_OMP_CLAUSE_COPYIN, PRAGMA_OMP_CLAUSE_COPYPRIVATE, PRAGMA_OMP_CLAUSE_DEFAULT, PRAGMA_OMP_CLAUSE_FIRSTPRIVATE, PRAGMA_OMP_CLAUSE_IF, PRAGMA_OMP_CLAUSE_LASTPRIVATE, PRAGMA_OMP_CLAUSE_NOWAIT, PRAGMA_OMP_CLAUSE_NUM_THREADS, PRAGMA_OMP_CLAUSE_ORDERED, PRAGMA_OMP_CLAUSE_PRIVATE, PRAGMA_OMP_CLAUSE_REDUCTION, PRAGMA_OMP_CLAUSE_SCHEDULE, PRAGMA_OMP_CLAUSE_SHARED } pragma_omp_clause; /* Returns name of the next clause. If the clause is not recognized PRAGMA_OMP_CLAUSE_NONE is returned and the token is not consumed. Otherwise appropriate pragma_omp_clause is returned and the token is consumed. */ static pragma_omp_clause cp_parser_omp_clause_name (cp_parser *parser) { pragma_omp_clause result = PRAGMA_OMP_CLAUSE_NONE; if (cp_lexer_next_token_is_keyword (parser->lexer, RID_IF)) result = PRAGMA_OMP_CLAUSE_IF; else if (cp_lexer_next_token_is_keyword (parser->lexer, RID_DEFAULT)) result = PRAGMA_OMP_CLAUSE_DEFAULT; else if (cp_lexer_next_token_is_keyword (parser->lexer, RID_PRIVATE)) result = PRAGMA_OMP_CLAUSE_PRIVATE; else if (cp_lexer_next_token_is (parser->lexer, CPP_NAME)) { tree id = cp_lexer_peek_token (parser->lexer)->u.value; const char *p = IDENTIFIER_POINTER (id); switch (p[0]) { case 'c': if (!strcmp ("copyin", p)) result = PRAGMA_OMP_CLAUSE_COPYIN; else if (!strcmp ("copyprivate", p)) result = PRAGMA_OMP_CLAUSE_COPYPRIVATE; break; case 'f': if (!strcmp ("firstprivate", p)) result = PRAGMA_OMP_CLAUSE_FIRSTPRIVATE; break; case 'l': if (!strcmp ("lastprivate", p)) result = PRAGMA_OMP_CLAUSE_LASTPRIVATE; break; case 'n': if (!strcmp ("nowait", p)) result = PRAGMA_OMP_CLAUSE_NOWAIT; else if (!strcmp ("num_threads", p)) result = PRAGMA_OMP_CLAUSE_NUM_THREADS; break; case 'o': if (!strcmp ("ordered", p)) result = PRAGMA_OMP_CLAUSE_ORDERED; break; case 'r': if (!strcmp ("reduction", p)) result = PRAGMA_OMP_CLAUSE_REDUCTION; break; case 's': if (!strcmp ("schedule", p)) result = PRAGMA_OMP_CLAUSE_SCHEDULE; else if (!strcmp ("shared", p)) result = PRAGMA_OMP_CLAUSE_SHARED; break; } } if (result != PRAGMA_OMP_CLAUSE_NONE) cp_lexer_consume_token (parser->lexer); return result; } /* Validate that a clause of the given type does not already exist. */ static void check_no_duplicate_clause (tree clauses, enum tree_code code, const char *name) { tree c; for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_CODE (c) == code) { error ("too many %qs clauses", name); break; } } /* OpenMP 2.5: variable-list: identifier variable-list , identifier In addition, we match a closing parenthesis. An opening parenthesis will have been consumed by the caller. If KIND is nonzero, create the appropriate node and install the decl in OMP_CLAUSE_DECL and add the node to the head of the list. If KIND is zero, create a TREE_LIST with the decl in TREE_PURPOSE; return the list created. */ static tree cp_parser_omp_var_list_no_open (cp_parser *parser, enum omp_clause_code kind, tree list) { while (1) { tree name, decl; name = cp_parser_id_expression (parser, /*template_p=*/false, /*check_dependency_p=*/true, /*template_p=*/NULL, /*declarator_p=*/false, /*optional_p=*/false); if (name == error_mark_node) goto skip_comma; decl = cp_parser_lookup_name_simple (parser, name); if (decl == error_mark_node) cp_parser_name_lookup_error (parser, name, decl, NULL); else if (kind != 0) { tree u = build_omp_clause (kind); OMP_CLAUSE_DECL (u) = decl; OMP_CLAUSE_CHAIN (u) = list; list = u; } else list = tree_cons (decl, NULL_TREE, list); get_comma: if (cp_lexer_next_token_is_not (parser->lexer, CPP_COMMA)) break; cp_lexer_consume_token (parser->lexer); } if (!cp_parser_require (parser, CPP_CLOSE_PAREN, "`)'")) { int ending; /* Try to resync to an unnested comma. Copied from cp_parser_parenthesized_expression_list. */ skip_comma: ending = cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true, /*or_comma=*/true, /*consume_paren=*/true); if (ending < 0) goto get_comma; } return list; } /* Similarly, but expect leading and trailing parenthesis. This is a very common case for omp clauses. */ static tree cp_parser_omp_var_list (cp_parser *parser, enum omp_clause_code kind, tree list) { if (cp_parser_require (parser, CPP_OPEN_PAREN, "`('")) return cp_parser_omp_var_list_no_open (parser, kind, list); return list; } /* OpenMP 2.5: default ( shared | none ) */ static tree cp_parser_omp_clause_default (cp_parser *parser, tree list) { enum omp_clause_default_kind kind = OMP_CLAUSE_DEFAULT_UNSPECIFIED; tree c; if (!cp_parser_require (parser, CPP_OPEN_PAREN, "`('")) return list; if (cp_lexer_next_token_is (parser->lexer, CPP_NAME)) { tree id = cp_lexer_peek_token (parser->lexer)->u.value; const char *p = IDENTIFIER_POINTER (id); switch (p[0]) { case 'n': if (strcmp ("none", p) != 0) goto invalid_kind; kind = OMP_CLAUSE_DEFAULT_NONE; break; case 's': if (strcmp ("shared", p) != 0) goto invalid_kind; kind = OMP_CLAUSE_DEFAULT_SHARED; break; default: goto invalid_kind; } cp_lexer_consume_token (parser->lexer); } else { invalid_kind: cp_parser_error (parser, "expected %<none%> or %<shared%>"); } if (!cp_parser_require (parser, CPP_CLOSE_PAREN, "`)'")) cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true, /*or_comma=*/false, /*consume_paren=*/true); if (kind == OMP_CLAUSE_DEFAULT_UNSPECIFIED) return list; check_no_duplicate_clause (list, OMP_CLAUSE_DEFAULT, "default"); c = build_omp_clause (OMP_CLAUSE_DEFAULT); OMP_CLAUSE_CHAIN (c) = list; OMP_CLAUSE_DEFAULT_KIND (c) = kind; return c; } /* OpenMP 2.5: if ( expression ) */ static tree cp_parser_omp_clause_if (cp_parser *parser, tree list) { tree t, c; if (!cp_parser_require (parser, CPP_OPEN_PAREN, "`('")) return list; t = cp_parser_condition (parser); if (t == error_mark_node || !cp_parser_require (parser, CPP_CLOSE_PAREN, "`)'")) cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true, /*or_comma=*/false, /*consume_paren=*/true); check_no_duplicate_clause (list, OMP_CLAUSE_IF, "if"); c = build_omp_clause (OMP_CLAUSE_IF); OMP_CLAUSE_IF_EXPR (c) = t; OMP_CLAUSE_CHAIN (c) = list; return c; } /* OpenMP 2.5: nowait */ static tree cp_parser_omp_clause_nowait (cp_parser *parser ATTRIBUTE_UNUSED, tree list) { tree c; check_no_duplicate_clause (list, OMP_CLAUSE_NOWAIT, "nowait"); c = build_omp_clause (OMP_CLAUSE_NOWAIT); OMP_CLAUSE_CHAIN (c) = list; return c; } /* OpenMP 2.5: num_threads ( expression ) */ static tree cp_parser_omp_clause_num_threads (cp_parser *parser, tree list) { tree t, c; if (!cp_parser_require (parser, CPP_OPEN_PAREN, "`('")) return list; t = cp_parser_expression (parser, false); if (t == error_mark_node || !cp_parser_require (parser, CPP_CLOSE_PAREN, "`)'")) cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true, /*or_comma=*/false, /*consume_paren=*/true); check_no_duplicate_clause (list, OMP_CLAUSE_NUM_THREADS, "num_threads"); c = build_omp_clause (OMP_CLAUSE_NUM_THREADS); OMP_CLAUSE_NUM_THREADS_EXPR (c) = t; OMP_CLAUSE_CHAIN (c) = list; return c; } /* OpenMP 2.5: ordered */ static tree cp_parser_omp_clause_ordered (cp_parser *parser ATTRIBUTE_UNUSED, tree list) { tree c; check_no_duplicate_clause (list, OMP_CLAUSE_ORDERED, "ordered"); c = build_omp_clause (OMP_CLAUSE_ORDERED); OMP_CLAUSE_CHAIN (c) = list; return c; } /* OpenMP 2.5: reduction ( reduction-operator : variable-list ) reduction-operator: One of: + * - & ^ | && || */ static tree cp_parser_omp_clause_reduction (cp_parser *parser, tree list) { enum tree_code code; tree nlist, c; if (!cp_parser_require (parser, CPP_OPEN_PAREN, "`('")) return list; switch (cp_lexer_peek_token (parser->lexer)->type) { case CPP_PLUS: code = PLUS_EXPR; break; case CPP_MULT: code = MULT_EXPR; break; case CPP_MINUS: code = MINUS_EXPR; break; case CPP_AND: code = BIT_AND_EXPR; break; case CPP_XOR: code = BIT_XOR_EXPR; break; case CPP_OR: code = BIT_IOR_EXPR; break; case CPP_AND_AND: code = TRUTH_ANDIF_EXPR; break; case CPP_OR_OR: code = TRUTH_ORIF_EXPR; break; default: cp_parser_error (parser, "`+', `*', `-', `&', `^', `|', `&&', or `||'"); resync_fail: cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true, /*or_comma=*/false, /*consume_paren=*/true); return list; } cp_lexer_consume_token (parser->lexer); if (!cp_parser_require (parser, CPP_COLON, "`:'")) goto resync_fail; nlist = cp_parser_omp_var_list_no_open (parser, OMP_CLAUSE_REDUCTION, list); for (c = nlist; c != list; c = OMP_CLAUSE_CHAIN (c)) OMP_CLAUSE_REDUCTION_CODE (c) = code; return nlist; } /* OpenMP 2.5: schedule ( schedule-kind ) schedule ( schedule-kind , expression ) schedule-kind: static | dynamic | guided | runtime */ static tree cp_parser_omp_clause_schedule (cp_parser *parser, tree list) { tree c, t; if (!cp_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>")) return list; c = build_omp_clause (OMP_CLAUSE_SCHEDULE); if (cp_lexer_next_token_is (parser->lexer, CPP_NAME)) { tree id = cp_lexer_peek_token (parser->lexer)->u.value; const char *p = IDENTIFIER_POINTER (id); switch (p[0]) { case 'd': if (strcmp ("dynamic", p) != 0) goto invalid_kind; OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_DYNAMIC; break; case 'g': if (strcmp ("guided", p) != 0) goto invalid_kind; OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_GUIDED; break; case 'r': if (strcmp ("runtime", p) != 0) goto invalid_kind; OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_RUNTIME; break; default: goto invalid_kind; } } else if (cp_lexer_next_token_is_keyword (parser->lexer, RID_STATIC)) OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_STATIC; else goto invalid_kind; cp_lexer_consume_token (parser->lexer); if (cp_lexer_next_token_is (parser->lexer, CPP_COMMA)) { cp_lexer_consume_token (parser->lexer); t = cp_parser_assignment_expression (parser, false); if (t == error_mark_node) goto resync_fail; else if (OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_RUNTIME) error ("schedule %<runtime%> does not take " "a %<chunk_size%> parameter"); else OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (c) = t; if (!cp_parser_require (parser, CPP_CLOSE_PAREN, "`)'")) goto resync_fail; } else if (!cp_parser_require (parser, CPP_CLOSE_PAREN, "`,' or `)'")) goto resync_fail; check_no_duplicate_clause (list, OMP_CLAUSE_SCHEDULE, "schedule"); OMP_CLAUSE_CHAIN (c) = list; return c; invalid_kind: cp_parser_error (parser, "invalid schedule kind"); resync_fail: cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true, /*or_comma=*/false, /*consume_paren=*/true); return list; } /* Parse all OpenMP clauses. The set clauses allowed by the directive is a bitmask in MASK. Return the list of clauses found; the result of clause default goes in *pdefault. */ static tree cp_parser_omp_all_clauses (cp_parser *parser, unsigned int mask, const char *where, cp_token *pragma_tok) { tree clauses = NULL; while (cp_lexer_next_token_is_not (parser->lexer, CPP_PRAGMA_EOL)) { pragma_omp_clause c_kind = cp_parser_omp_clause_name (parser); const char *c_name; tree prev = clauses; switch (c_kind) { case PRAGMA_OMP_CLAUSE_COPYIN: clauses = cp_parser_omp_var_list (parser, OMP_CLAUSE_COPYIN, clauses); c_name = "copyin"; break; case PRAGMA_OMP_CLAUSE_COPYPRIVATE: clauses = cp_parser_omp_var_list (parser, OMP_CLAUSE_COPYPRIVATE, clauses); c_name = "copyprivate"; break; case PRAGMA_OMP_CLAUSE_DEFAULT: clauses = cp_parser_omp_clause_default (parser, clauses); c_name = "default"; break; case PRAGMA_OMP_CLAUSE_FIRSTPRIVATE: clauses = cp_parser_omp_var_list (parser, OMP_CLAUSE_FIRSTPRIVATE, clauses); c_name = "firstprivate"; break; case PRAGMA_OMP_CLAUSE_IF: clauses = cp_parser_omp_clause_if (parser, clauses); c_name = "if"; break; case PRAGMA_OMP_CLAUSE_LASTPRIVATE: clauses = cp_parser_omp_var_list (parser, OMP_CLAUSE_LASTPRIVATE, clauses); c_name = "lastprivate"; break; case PRAGMA_OMP_CLAUSE_NOWAIT: clauses = cp_parser_omp_clause_nowait (parser, clauses); c_name = "nowait"; break; case PRAGMA_OMP_CLAUSE_NUM_THREADS: clauses = cp_parser_omp_clause_num_threads (parser, clauses); c_name = "num_threads"; break; case PRAGMA_OMP_CLAUSE_ORDERED: clauses = cp_parser_omp_clause_ordered (parser, clauses); c_name = "ordered"; break; case PRAGMA_OMP_CLAUSE_PRIVATE: clauses = cp_parser_omp_var_list (parser, OMP_CLAUSE_PRIVATE, clauses); c_name = "private"; break; case PRAGMA_OMP_CLAUSE_REDUCTION: clauses = cp_parser_omp_clause_reduction (parser, clauses); c_name = "reduction"; break; case PRAGMA_OMP_CLAUSE_SCHEDULE: clauses = cp_parser_omp_clause_schedule (parser, clauses); c_name = "schedule"; break; case PRAGMA_OMP_CLAUSE_SHARED: clauses = cp_parser_omp_var_list (parser, OMP_CLAUSE_SHARED, clauses); c_name = "shared"; break; default: cp_parser_error (parser, "expected %<#pragma omp%> clause"); goto saw_error; } if (((mask >> c_kind) & 1) == 0) { /* Remove the invalid clause(s) from the list to avoid confusing the rest of the compiler. */ clauses = prev; error ("%qs is not valid for %qs", c_name, where); } } saw_error: cp_parser_skip_to_pragma_eol (parser, pragma_tok); return finish_omp_clauses (clauses); } /* OpenMP 2.5: structured-block: statement In practice, we're also interested in adding the statement to an outer node. So it is convenient if we work around the fact that cp_parser_statement calls add_stmt. */ static unsigned cp_parser_begin_omp_structured_block (cp_parser *parser) { unsigned save = parser->in_statement; /* Only move the values to IN_OMP_BLOCK if they weren't false. This preserves the "not within loop or switch" style error messages for nonsense cases like void foo() { #pragma omp single break; } */ if (parser->in_statement) parser->in_statement = IN_OMP_BLOCK; return save; } static void cp_parser_end_omp_structured_block (cp_parser *parser, unsigned save) { parser->in_statement = save; } static tree cp_parser_omp_structured_block (cp_parser *parser) { tree stmt = begin_omp_structured_block (); unsigned int save = cp_parser_begin_omp_structured_block (parser); cp_parser_statement (parser, NULL_TREE, false); cp_parser_end_omp_structured_block (parser, save); return finish_omp_structured_block (stmt); } /* OpenMP 2.5: # pragma omp atomic new-line expression-stmt expression-stmt: x binop= expr | x++ | ++x | x-- | --x binop: +, *, -, /, &, ^, |, <<, >> where x is an lvalue expression with scalar type. */ static void cp_parser_omp_atomic (cp_parser *parser, cp_token *pragma_tok) { tree lhs, rhs; enum tree_code code; cp_parser_require_pragma_eol (parser, pragma_tok); lhs = cp_parser_unary_expression (parser, /*address_p=*/false, /*cast_p=*/false); switch (TREE_CODE (lhs)) { case ERROR_MARK: goto saw_error; case PREINCREMENT_EXPR: case POSTINCREMENT_EXPR: lhs = TREE_OPERAND (lhs, 0); code = PLUS_EXPR; rhs = integer_one_node; break; case PREDECREMENT_EXPR: case POSTDECREMENT_EXPR: lhs = TREE_OPERAND (lhs, 0); code = MINUS_EXPR; rhs = integer_one_node; break; default: switch (cp_lexer_peek_token (parser->lexer)->type) { case CPP_MULT_EQ: code = MULT_EXPR; break; case CPP_DIV_EQ: code = TRUNC_DIV_EXPR; break; case CPP_PLUS_EQ: code = PLUS_EXPR; break; case CPP_MINUS_EQ: code = MINUS_EXPR; break; case CPP_LSHIFT_EQ: code = LSHIFT_EXPR; break; case CPP_RSHIFT_EQ: code = RSHIFT_EXPR; break; case CPP_AND_EQ: code = BIT_AND_EXPR; break; case CPP_OR_EQ: code = BIT_IOR_EXPR; break; case CPP_XOR_EQ: code = BIT_XOR_EXPR; break; default: cp_parser_error (parser, "invalid operator for %<#pragma omp atomic%>"); goto saw_error; } cp_lexer_consume_token (parser->lexer); rhs = cp_parser_expression (parser, false); if (rhs == error_mark_node) goto saw_error; break; } finish_omp_atomic (code, lhs, rhs); cp_parser_consume_semicolon_at_end_of_statement (parser); return; saw_error: cp_parser_skip_to_end_of_block_or_statement (parser); } /* OpenMP 2.5: # pragma omp barrier new-line */ static void cp_parser_omp_barrier (cp_parser *parser, cp_token *pragma_tok) { cp_parser_require_pragma_eol (parser, pragma_tok); finish_omp_barrier (); } /* OpenMP 2.5: # pragma omp critical [(name)] new-line structured-block */ static tree cp_parser_omp_critical (cp_parser *parser, cp_token *pragma_tok) { tree stmt, name = NULL; if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN)) { cp_lexer_consume_token (parser->lexer); name = cp_parser_identifier (parser); if (name == error_mark_node || !cp_parser_require (parser, CPP_CLOSE_PAREN, "`)'")) cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true, /*or_comma=*/false, /*consume_paren=*/true); if (name == error_mark_node) name = NULL; } cp_parser_require_pragma_eol (parser, pragma_tok); stmt = cp_parser_omp_structured_block (parser); return c_finish_omp_critical (stmt, name); } /* OpenMP 2.5: # pragma omp flush flush-vars[opt] new-line flush-vars: ( variable-list ) */ static void cp_parser_omp_flush (cp_parser *parser, cp_token *pragma_tok) { if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN)) (void) cp_parser_omp_var_list (parser, 0, NULL); cp_parser_require_pragma_eol (parser, pragma_tok); finish_omp_flush (); } /* Parse the restricted form of the for statment allowed by OpenMP. */ static tree cp_parser_omp_for_loop (cp_parser *parser) { tree init, cond, incr, body, decl, pre_body; location_t loc; if (!cp_lexer_next_token_is_keyword (parser->lexer, RID_FOR)) { cp_parser_error (parser, "for statement expected"); return NULL; } loc = cp_lexer_consume_token (parser->lexer)->location; if (!cp_parser_require (parser, CPP_OPEN_PAREN, "`('")) return NULL; init = decl = NULL; pre_body = push_stmt_list (); if (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON)) { cp_decl_specifier_seq type_specifiers; /* First, try to parse as an initialized declaration. See cp_parser_condition, from whence the bulk of this is copied. */ cp_parser_parse_tentatively (parser); cp_parser_type_specifier_seq (parser, /*is_condition=*/false, &type_specifiers); if (!cp_parser_error_occurred (parser)) { tree asm_specification, attributes; cp_declarator *declarator; declarator = cp_parser_declarator (parser, CP_PARSER_DECLARATOR_NAMED, /*ctor_dtor_or_conv_p=*/NULL, /*parenthesized_p=*/NULL, /*member_p=*/false); attributes = cp_parser_attributes_opt (parser); asm_specification = cp_parser_asm_specification_opt (parser); cp_parser_require (parser, CPP_EQ, "`='"); if (cp_parser_parse_definitely (parser)) { tree pushed_scope; decl = start_decl (declarator, &type_specifiers, /*initialized_p=*/false, attributes, /*prefix_attributes=*/NULL_TREE, &pushed_scope); init = cp_parser_assignment_expression (parser, false); cp_finish_decl (decl, NULL_TREE, /*init_const_expr_p=*/false, asm_specification, LOOKUP_ONLYCONVERTING); if (pushed_scope) pop_scope (pushed_scope); } } else cp_parser_abort_tentative_parse (parser); /* If parsing as an initialized declaration failed, try again as a simple expression. */ if (decl == NULL) init = cp_parser_expression (parser, false); } cp_parser_require (parser, CPP_SEMICOLON, "`;'"); pre_body = pop_stmt_list (pre_body); cond = NULL; if (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON)) cond = cp_parser_condition (parser); cp_parser_require (parser, CPP_SEMICOLON, "`;'"); incr = NULL; if (cp_lexer_next_token_is_not (parser->lexer, CPP_CLOSE_PAREN)) incr = cp_parser_expression (parser, false); if (!cp_parser_require (parser, CPP_CLOSE_PAREN, "`)'")) cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true, /*or_comma=*/false, /*consume_paren=*/true); /* Note that we saved the original contents of this flag when we entered the structured block, and so we don't need to re-save it here. */ parser->in_statement = IN_OMP_FOR; /* Note that the grammar doesn't call for a structured block here, though the loop as a whole is a structured block. */ body = push_stmt_list (); cp_parser_statement (parser, NULL_TREE, false); body = pop_stmt_list (body); return finish_omp_for (loc, decl, init, cond, incr, body, pre_body); } /* OpenMP 2.5: #pragma omp for for-clause[optseq] new-line for-loop */ #define OMP_FOR_CLAUSE_MASK \ ( (1u << PRAGMA_OMP_CLAUSE_PRIVATE) \ | (1u << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \ | (1u << PRAGMA_OMP_CLAUSE_LASTPRIVATE) \ | (1u << PRAGMA_OMP_CLAUSE_REDUCTION) \ | (1u << PRAGMA_OMP_CLAUSE_ORDERED) \ | (1u << PRAGMA_OMP_CLAUSE_SCHEDULE) \ | (1u << PRAGMA_OMP_CLAUSE_NOWAIT)) static tree cp_parser_omp_for (cp_parser *parser, cp_token *pragma_tok) { tree clauses, sb, ret; unsigned int save; clauses = cp_parser_omp_all_clauses (parser, OMP_FOR_CLAUSE_MASK, "#pragma omp for", pragma_tok); sb = begin_omp_structured_block (); save = cp_parser_begin_omp_structured_block (parser); ret = cp_parser_omp_for_loop (parser); if (ret) OMP_FOR_CLAUSES (ret) = clauses; cp_parser_end_omp_structured_block (parser, save); add_stmt (finish_omp_structured_block (sb)); return ret; } /* OpenMP 2.5: # pragma omp master new-line structured-block */ static tree cp_parser_omp_master (cp_parser *parser, cp_token *pragma_tok) { cp_parser_require_pragma_eol (parser, pragma_tok); return c_finish_omp_master (cp_parser_omp_structured_block (parser)); } /* OpenMP 2.5: # pragma omp ordered new-line structured-block */ static tree cp_parser_omp_ordered (cp_parser *parser, cp_token *pragma_tok) { cp_parser_require_pragma_eol (parser, pragma_tok); return c_finish_omp_ordered (cp_parser_omp_structured_block (parser)); } /* OpenMP 2.5: section-scope: { section-sequence } section-sequence: section-directive[opt] structured-block section-sequence section-directive structured-block */ static tree cp_parser_omp_sections_scope (cp_parser *parser) { tree stmt, substmt; bool error_suppress = false; cp_token *tok; if (!cp_parser_require (parser, CPP_OPEN_BRACE, "`{'")) return NULL_TREE; stmt = push_stmt_list (); if (cp_lexer_peek_token (parser->lexer)->pragma_kind != PRAGMA_OMP_SECTION) { unsigned save; substmt = begin_omp_structured_block (); save = cp_parser_begin_omp_structured_block (parser); while (1) { cp_parser_statement (parser, NULL_TREE, false); tok = cp_lexer_peek_token (parser->lexer); if (tok->pragma_kind == PRAGMA_OMP_SECTION) break; if (tok->type == CPP_CLOSE_BRACE) break; if (tok->type == CPP_EOF) break; } cp_parser_end_omp_structured_block (parser, save); substmt = finish_omp_structured_block (substmt); substmt = build1 (OMP_SECTION, void_type_node, substmt); add_stmt (substmt); } while (1) { tok = cp_lexer_peek_token (parser->lexer); if (tok->type == CPP_CLOSE_BRACE) break; if (tok->type == CPP_EOF) break; if (tok->pragma_kind == PRAGMA_OMP_SECTION) { cp_lexer_consume_token (parser->lexer); cp_parser_require_pragma_eol (parser, tok); error_suppress = false; } else if (!error_suppress) { cp_parser_error (parser, "expected %<#pragma omp section%> or %<}%>"); error_suppress = true; } substmt = cp_parser_omp_structured_block (parser); substmt = build1 (OMP_SECTION, void_type_node, substmt); add_stmt (substmt); } cp_parser_require (parser, CPP_CLOSE_BRACE, "`}'"); substmt = pop_stmt_list (stmt); stmt = make_node (OMP_SECTIONS); TREE_TYPE (stmt) = void_type_node; OMP_SECTIONS_BODY (stmt) = substmt; add_stmt (stmt); return stmt; } /* OpenMP 2.5: # pragma omp sections sections-clause[optseq] newline sections-scope */ #define OMP_SECTIONS_CLAUSE_MASK \ ( (1u << PRAGMA_OMP_CLAUSE_PRIVATE) \ | (1u << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \ | (1u << PRAGMA_OMP_CLAUSE_LASTPRIVATE) \ | (1u << PRAGMA_OMP_CLAUSE_REDUCTION) \ | (1u << PRAGMA_OMP_CLAUSE_NOWAIT)) static tree cp_parser_omp_sections (cp_parser *parser, cp_token *pragma_tok) { tree clauses, ret; clauses = cp_parser_omp_all_clauses (parser, OMP_SECTIONS_CLAUSE_MASK, "#pragma omp sections", pragma_tok); ret = cp_parser_omp_sections_scope (parser); if (ret) OMP_SECTIONS_CLAUSES (ret) = clauses; return ret; } /* OpenMP 2.5: # pragma parallel parallel-clause new-line # pragma parallel for parallel-for-clause new-line # pragma parallel sections parallel-sections-clause new-line */ #define OMP_PARALLEL_CLAUSE_MASK \ ( (1u << PRAGMA_OMP_CLAUSE_IF) \ | (1u << PRAGMA_OMP_CLAUSE_PRIVATE) \ | (1u << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \ | (1u << PRAGMA_OMP_CLAUSE_DEFAULT) \ | (1u << PRAGMA_OMP_CLAUSE_SHARED) \ | (1u << PRAGMA_OMP_CLAUSE_COPYIN) \ | (1u << PRAGMA_OMP_CLAUSE_REDUCTION) \ | (1u << PRAGMA_OMP_CLAUSE_NUM_THREADS)) static tree cp_parser_omp_parallel (cp_parser *parser, cp_token *pragma_tok) { enum pragma_kind p_kind = PRAGMA_OMP_PARALLEL; const char *p_name = "#pragma omp parallel"; tree stmt, clauses, par_clause, ws_clause, block; unsigned int mask = OMP_PARALLEL_CLAUSE_MASK; unsigned int save; if (cp_lexer_next_token_is_keyword (parser->lexer, RID_FOR)) { cp_lexer_consume_token (parser->lexer); p_kind = PRAGMA_OMP_PARALLEL_FOR; p_name = "#pragma omp parallel for"; mask |= OMP_FOR_CLAUSE_MASK; mask &= ~(1u << PRAGMA_OMP_CLAUSE_NOWAIT); } else if (cp_lexer_next_token_is (parser->lexer, CPP_NAME)) { tree id = cp_lexer_peek_token (parser->lexer)->u.value; const char *p = IDENTIFIER_POINTER (id); if (strcmp (p, "sections") == 0) { cp_lexer_consume_token (parser->lexer); p_kind = PRAGMA_OMP_PARALLEL_SECTIONS; p_name = "#pragma omp parallel sections"; mask |= OMP_SECTIONS_CLAUSE_MASK; mask &= ~(1u << PRAGMA_OMP_CLAUSE_NOWAIT); } } clauses = cp_parser_omp_all_clauses (parser, mask, p_name, pragma_tok); block = begin_omp_parallel (); save = cp_parser_begin_omp_structured_block (parser); switch (p_kind) { case PRAGMA_OMP_PARALLEL: cp_parser_already_scoped_statement (parser); par_clause = clauses; break; case PRAGMA_OMP_PARALLEL_FOR: c_split_parallel_clauses (clauses, &par_clause, &ws_clause); stmt = cp_parser_omp_for_loop (parser); if (stmt) OMP_FOR_CLAUSES (stmt) = ws_clause; break; case PRAGMA_OMP_PARALLEL_SECTIONS: c_split_parallel_clauses (clauses, &par_clause, &ws_clause); stmt = cp_parser_omp_sections_scope (parser); if (stmt) OMP_SECTIONS_CLAUSES (stmt) = ws_clause; break; default: gcc_unreachable (); } cp_parser_end_omp_structured_block (parser, save); stmt = finish_omp_parallel (par_clause, block); if (p_kind != PRAGMA_OMP_PARALLEL) OMP_PARALLEL_COMBINED (stmt) = 1; return stmt; } /* OpenMP 2.5: # pragma omp single single-clause[optseq] new-line structured-block */ #define OMP_SINGLE_CLAUSE_MASK \ ( (1u << PRAGMA_OMP_CLAUSE_PRIVATE) \ | (1u << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \ | (1u << PRAGMA_OMP_CLAUSE_COPYPRIVATE) \ | (1u << PRAGMA_OMP_CLAUSE_NOWAIT)) static tree cp_parser_omp_single (cp_parser *parser, cp_token *pragma_tok) { tree stmt = make_node (OMP_SINGLE); TREE_TYPE (stmt) = void_type_node; OMP_SINGLE_CLAUSES (stmt) = cp_parser_omp_all_clauses (parser, OMP_SINGLE_CLAUSE_MASK, "#pragma omp single", pragma_tok); OMP_SINGLE_BODY (stmt) = cp_parser_omp_structured_block (parser); return add_stmt (stmt); } /* OpenMP 2.5: # pragma omp threadprivate (variable-list) */ static void cp_parser_omp_threadprivate (cp_parser *parser, cp_token *pragma_tok) { tree vars; vars = cp_parser_omp_var_list (parser, 0, NULL); cp_parser_require_pragma_eol (parser, pragma_tok); if (!targetm.have_tls) sorry ("threadprivate variables not supported in this target"); finish_omp_threadprivate (vars); } /* Main entry point to OpenMP statement pragmas. */ static void cp_parser_omp_construct (cp_parser *parser, cp_token *pragma_tok) { tree stmt; switch (pragma_tok->pragma_kind) { case PRAGMA_OMP_ATOMIC: cp_parser_omp_atomic (parser, pragma_tok); return; case PRAGMA_OMP_CRITICAL: stmt = cp_parser_omp_critical (parser, pragma_tok); break; case PRAGMA_OMP_FOR: stmt = cp_parser_omp_for (parser, pragma_tok); break; case PRAGMA_OMP_MASTER: stmt = cp_parser_omp_master (parser, pragma_tok); break; case PRAGMA_OMP_ORDERED: stmt = cp_parser_omp_ordered (parser, pragma_tok); break; case PRAGMA_OMP_PARALLEL: stmt = cp_parser_omp_parallel (parser, pragma_tok); break; case PRAGMA_OMP_SECTIONS: stmt = cp_parser_omp_sections (parser, pragma_tok); break; case PRAGMA_OMP_SINGLE: stmt = cp_parser_omp_single (parser, pragma_tok); break; default: gcc_unreachable (); } if (stmt) SET_EXPR_LOCATION (stmt, pragma_tok->location); } /* The parser. */ static GTY (()) cp_parser *the_parser; /* Special handling for the first token or line in the file. The first thing in the file might be #pragma GCC pch_preprocess, which loads a PCH file, which is a GC collection point. So we need to handle this first pragma without benefit of an existing lexer structure. Always returns one token to the caller in *FIRST_TOKEN. This is either the true first token of the file, or the first token after the initial pragma. */ static void cp_parser_initial_pragma (cp_token *first_token) { tree name = NULL; cp_lexer_get_preprocessor_token (NULL, first_token); /* APPLE LOCAL begin 4137741 */ while (first_token->type == CPP_BINCL || first_token->type == CPP_EINCL) { if (first_token->type == CPP_BINCL) (*debug_hooks->start_source_file) (TREE_INT_CST_LOW (first_token->u.value), first_token->location.file); else (*debug_hooks->end_source_file) (TREE_INT_CST_LOW (first_token->u.value)); cp_lexer_get_preprocessor_token (NULL, first_token); } /* APPLE LOCAL end 4137741 */ if (first_token->pragma_kind != PRAGMA_GCC_PCH_PREPROCESS) return; cp_lexer_get_preprocessor_token (NULL, first_token); if (first_token->type == CPP_STRING) { name = first_token->u.value; cp_lexer_get_preprocessor_token (NULL, first_token); if (first_token->type != CPP_PRAGMA_EOL) error ("junk at end of %<#pragma GCC pch_preprocess%>"); } else error ("expected string literal"); /* Skip to the end of the pragma. */ while (first_token->type != CPP_PRAGMA_EOL && first_token->type != CPP_EOF) cp_lexer_get_preprocessor_token (NULL, first_token); /* Now actually load the PCH file. */ if (name) c_common_pch_pragma (parse_in, TREE_STRING_POINTER (name)); /* Read one more token to return to our caller. We have to do this after reading the PCH file in, since its pointers have to be live. */ cp_lexer_get_preprocessor_token (NULL, first_token); } /* Normal parsing of a pragma token. Here we can (and must) use the regular lexer. */ static bool cp_parser_pragma (cp_parser *parser, enum pragma_context context) { cp_token *pragma_tok; unsigned int id; pragma_tok = cp_lexer_consume_token (parser->lexer); gcc_assert (pragma_tok->type == CPP_PRAGMA); parser->lexer->in_pragma = true; id = pragma_tok->pragma_kind; switch (id) { case PRAGMA_GCC_PCH_PREPROCESS: error ("%<#pragma GCC pch_preprocess%> must be first"); break; case PRAGMA_OMP_BARRIER: switch (context) { case pragma_compound: cp_parser_omp_barrier (parser, pragma_tok); return false; case pragma_stmt: error ("%<#pragma omp barrier%> may only be " "used in compound statements"); break; default: goto bad_stmt; } break; case PRAGMA_OMP_FLUSH: switch (context) { case pragma_compound: cp_parser_omp_flush (parser, pragma_tok); return false; case pragma_stmt: error ("%<#pragma omp flush%> may only be " "used in compound statements"); break; default: goto bad_stmt; } break; case PRAGMA_OMP_THREADPRIVATE: cp_parser_omp_threadprivate (parser, pragma_tok); return false; case PRAGMA_OMP_ATOMIC: case PRAGMA_OMP_CRITICAL: case PRAGMA_OMP_FOR: case PRAGMA_OMP_MASTER: case PRAGMA_OMP_ORDERED: case PRAGMA_OMP_PARALLEL: case PRAGMA_OMP_SECTIONS: case PRAGMA_OMP_SINGLE: if (context == pragma_external) goto bad_stmt; cp_parser_omp_construct (parser, pragma_tok); return true; case PRAGMA_OMP_SECTION: error ("%<#pragma omp section%> may only be used in " "%<#pragma omp sections%> construct"); break; default: gcc_assert (id >= PRAGMA_FIRST_EXTERNAL); c_invoke_pragma_handler (id); break; bad_stmt: cp_parser_error (parser, "expected declaration specifiers"); break; } cp_parser_skip_to_pragma_eol (parser, pragma_tok); return false; } /* The interface the pragma parsers have to the lexer. */ enum cpp_ttype pragma_lex (tree *value) { cp_token *tok; enum cpp_ttype ret; tok = cp_lexer_peek_token (the_parser->lexer); ret = tok->type; *value = tok->u.value; if (ret == CPP_PRAGMA_EOL || ret == CPP_EOF) ret = CPP_EOF; else if (ret == CPP_STRING) *value = cp_parser_string_literal (the_parser, false, false); else { cp_lexer_consume_token (the_parser->lexer); if (ret == CPP_KEYWORD) ret = CPP_NAME; } return ret; } /* External interface. */ /* Parse one entire translation unit. */ void c_parse_file (void) { bool error_occurred; static bool already_called = false; if (already_called) { sorry ("inter-module optimizations not implemented for C++"); return; } already_called = true; the_parser = cp_parser_new (); push_deferring_access_checks (flag_access_control ? dk_no_deferred : dk_no_check); error_occurred = cp_parser_translation_unit (the_parser); the_parser = NULL; /* APPLE LOCAL begin radar 4874613 */ /* Bad parse errors. Just forget about it. */ if (! global_bindings_p () || current_class_type || decl_namespace_list) return; if (pch_file) c_common_write_pch (); /* APPLE LOCAL end radar 4874613 */ } /* This variable must be provided by every front end. */ int yydebug; #include "gt-cp-parser.h"
colorspace.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC OOO L OOO RRRR SSSSS PPPP AAA CCCC EEEEE % % C O O L O O R R SS P P A A C E % % C O O L O O RRRR SSS PPPP AAAAA C EEE % % C O O L O O R R SS P A A C E % % CCCC OOO LLLLL OOO R R SSSSS P A A CCCC EEEEE % % % % % % MagickCore Image Colorspace Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/attribute.h" #include "magick/cache.h" #include "magick/cache-private.h" #include "magick/cache-view.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/enhance.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/gem.h" #include "magick/gem-private.h" #include "magick/memory_.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/pixel-private.h" #include "magick/property.h" #include "magick/quantize.h" #include "magick/quantum.h" #include "magick/resource_.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/utility.h" /* Typedef declarations. */ typedef struct _TransformPacket { MagickRealType x, y, z; } TransformPacket; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C o l o r s p a c e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageColorspaceType() returns the potential colorspace of image: % sRGBColorspaceType, RGBColorspaceType, GRAYColorspaceType, etc. % % To ensure the image type matches its potential, use SetImageColorspaceType(): % % (void) SetImageColorspaceType(image,GetImageColorspaceType(image), % exception); % % The format of the GetImageColorspaceType method is: % % ColorspaceType GetImageColorspaceType(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport ColorspaceType GetImageColorspaceType(const Image *image, ExceptionInfo *exception) { ColorspaceType colorspace; ImageType type; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); colorspace=image->colorspace; type=IdentifyImageType(image,exception); if ((type == BilevelType) || (type == GrayscaleType) || (type == GrayscaleMatteType)) colorspace=GRAYColorspace; return(colorspace); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R G B T r a n s f o r m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RGBTransformImage() converts the reference image from sRGB to an alternate % colorspace. The transformation matrices are not the standard ones: the % weights are rescaled to normalized the range of the transformed values to % be [0..QuantumRange]. % % The format of the RGBTransformImage method is: % % MagickBooleanType RGBTransformImage(Image *image, % const ColorspaceType colorspace) % % A description of each parameter follows: % % o image: the image. % % o colorspace: the colorspace to transform the image to. % */ static inline void ConvertRGBToCMY(const Quantum red,const Quantum green, const Quantum blue,double *cyan,double *magenta,double *yellow) { *cyan=QuantumScale*(QuantumRange-red); *magenta=QuantumScale*(QuantumRange-green); *yellow=QuantumScale*(QuantumRange-blue); } static void ConvertRGBToLab(const Quantum red,const Quantum green, const Quantum blue,double *L,double *a,double *b) { double X, Y, Z; ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); ConvertXYZToLab(X,Y,Z,L,a,b); } static inline void ConvertXYZToLMS(const double x,const double y, const double z,double *L,double *M,double *S) { *L=0.7328*x+0.4296*y-0.1624*z; *M=(-0.7036*x+1.6975*y+0.0061*z); *S=0.0030*x+0.0136*y+0.9834*z; } static void ConvertRGBToLMS(const Quantum red,const Quantum green, const Quantum blue,double *L,double *M,double *S) { double X, Y, Z; ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); ConvertXYZToLMS(X,Y,Z,L,M,S); } static void ConvertRGBToLuv(const Quantum red,const Quantum green, const Quantum blue,double *L,double *u,double *v) { double X, Y, Z; ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); ConvertXYZToLuv(X,Y,Z,L,u,v); } static void ConvertRGBToxyY(const Quantum red,const Quantum green, const Quantum blue,double *low_x,double *low_y,double *cap_Y) { double gamma, X, Y, Z; ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); gamma=PerceptibleReciprocal(X+Y+Z); *low_x=gamma*X; *low_y=gamma*Y; *cap_Y=Y; } static void ConvertRGBToYPbPr(const Quantum red,const Quantum green, const Quantum blue,double *Y,double *Pb,double *Pr) { *Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue); *Pb=QuantumScale*((-0.1687367)*red-0.331264*green+0.5*blue)+0.5; *Pr=QuantumScale*(0.5*red-0.418688*green-0.081312*blue)+0.5; } static void ConvertRGBToYCbCr(const Quantum red,const Quantum green, const Quantum blue,double *Y,double *Cb,double *Cr) { ConvertRGBToYPbPr(red,green,blue,Y,Cb,Cr); } static void ConvertRGBToYUV(const Quantum red,const Quantum green, const Quantum blue,double *Y,double *U,double *V) { *Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue); *U=QuantumScale*((-0.147)*red-0.289*green+0.436*blue)+0.5; *V=QuantumScale*(0.615*red-0.515*green-0.100*blue)+0.5; } static void ConvertRGBToYDbDr(const Quantum red,const Quantum green, const Quantum blue,double *Y,double *Db,double *Dr) { *Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue); *Db=QuantumScale*(-0.450*red-0.883*green+1.333*blue)+0.5; *Dr=QuantumScale*(-1.333*red+1.116*green+0.217*blue)+0.5; } static void ConvertRGBToYIQ(const Quantum red,const Quantum green, const Quantum blue,double *Y,double *I,double *Q) { *Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue); *I=QuantumScale*(0.595716*red-0.274453*green-0.321263*blue)+0.5; *Q=QuantumScale*(0.211456*red-0.522591*green+0.311135*blue)+0.5; } MagickExport MagickBooleanType RGBTransformImage(Image *image, const ColorspaceType colorspace) { #define RGBTransformImageTag "RGBTransform/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; PrimaryInfo primary_info; register ssize_t i; ssize_t y; TransformPacket *x_map, *y_map, *z_map; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(colorspace != sRGBColorspace); assert(colorspace != TransparentColorspace); assert(colorspace != UndefinedColorspace); status=MagickTrue; progress=0; exception=(&image->exception); switch (colorspace) { case CMYKColorspace: { MagickPixelPacket zero; /* Convert RGB to CMYK colorspace. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } if (SetImageColorspace(image,colorspace) == MagickFalse) return(MagickFalse); GetMagickPixelPacket(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; MagickPixelPacket pixel; register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,q,indexes+x,&pixel); pixel.red=(MagickRealType) pixel.red; pixel.green=(MagickRealType) pixel.green; pixel.blue=(MagickRealType) pixel.blue; ConvertRGBToCMYK(&pixel); SetPixelPacket(image,&pixel,q,indexes+x); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); image->type=image->matte == MagickFalse ? ColorSeparationType : ColorSeparationMatteType; if (SetImageColorspace(image,colorspace) == MagickFalse) return(MagickFalse); return(status); } case LinearGRAYColorspace: case GRAYColorspace: { /* Transform image from sRGB to GRAY. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelGray(q,ClampToQuantum(GetPixelIntensity(image,q))); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,colorspace) == MagickFalse) return(MagickFalse); image->type=GrayscaleType; return(status); } case CMYColorspace: case HCLColorspace: case HCLpColorspace: case HSBColorspace: case HSIColorspace: case HSLColorspace: case HSVColorspace: case HWBColorspace: case LabColorspace: case LCHColorspace: case LCHabColorspace: case LCHuvColorspace: case LMSColorspace: case LuvColorspace: case xyYColorspace: case XYZColorspace: case YCbCrColorspace: case YDbDrColorspace: case YIQColorspace: case YPbPrColorspace: case YUVColorspace: { /* Transform image from sRGB to HSI. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double X, Y, Z; Quantum blue, green, red; red=ClampToQuantum((MagickRealType) GetPixelRed(q)); green=ClampToQuantum((MagickRealType) GetPixelGreen(q)); blue=ClampToQuantum((MagickRealType) GetPixelBlue(q)); switch (colorspace) { case CMYColorspace: { ConvertRGBToCMY(red,green,blue,&X,&Y,&Z); break; } case HCLColorspace: { ConvertRGBToHCL(red,green,blue,&X,&Y,&Z); break; } case HCLpColorspace: { ConvertRGBToHCLp(red,green,blue,&X,&Y,&Z); break; } case HSBColorspace: { ConvertRGBToHSB(red,green,blue,&X,&Y,&Z); break; } case HSIColorspace: { ConvertRGBToHSI(red,green,blue,&X,&Y,&Z); break; } case HSLColorspace: { ConvertRGBToHSL(red,green,blue,&X,&Y,&Z); break; } case HSVColorspace: { ConvertRGBToHSV(red,green,blue,&X,&Y,&Z); break; } case HWBColorspace: { ConvertRGBToHWB(red,green,blue,&X,&Y,&Z); break; } case LabColorspace: { ConvertRGBToLab(red,green,blue,&X,&Y,&Z); break; } case LCHColorspace: case LCHabColorspace: { ConvertRGBToLCHab(red,green,blue,&X,&Y,&Z); break; } case LCHuvColorspace: { ConvertRGBToLCHuv(red,green,blue,&X,&Y,&Z); break; } case LMSColorspace: { ConvertRGBToLMS(red,green,blue,&X,&Y,&Z); break; } case LuvColorspace: { ConvertRGBToLuv(red,green,blue,&X,&Y,&Z); break; } case xyYColorspace: { ConvertRGBToxyY(red,green,blue,&X,&Y,&Z); break; } case XYZColorspace: { ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); break; } case YCbCrColorspace: { ConvertRGBToYCbCr(red,green,blue,&X,&Y,&Z); break; } case YDbDrColorspace: { ConvertRGBToYDbDr(red,green,blue,&X,&Y,&Z); break; } case YIQColorspace: { ConvertRGBToYIQ(red,green,blue,&X,&Y,&Z); break; } case YPbPrColorspace: { ConvertRGBToYPbPr(red,green,blue,&X,&Y,&Z); break; } case YUVColorspace: { ConvertRGBToYUV(red,green,blue,&X,&Y,&Z); break; } default: { X=QuantumScale*red; Y=QuantumScale*green; Z=QuantumScale*blue; break; } } SetPixelRed(q,ClampToQuantum((MagickRealType) QuantumRange*X)); SetPixelGreen(q,ClampToQuantum((MagickRealType) QuantumRange*Y)); SetPixelBlue(q,ClampToQuantum((MagickRealType) QuantumRange*Z)); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,colorspace) == MagickFalse) return(MagickFalse); return(status); } case LogColorspace: { #define DisplayGamma (1.0/1.7) #define FilmGamma 0.6 #define ReferenceBlack 95.0 #define ReferenceWhite 685.0 const char *value; double black, density, film_gamma, gamma, reference_black, reference_white; Quantum *logmap; /* Transform RGB to Log colorspace. */ density=DisplayGamma; gamma=DisplayGamma; value=GetImageProperty(image,"gamma"); if (value != (const char *) NULL) gamma=PerceptibleReciprocal(StringToDouble(value,(char **) NULL)); film_gamma=FilmGamma; value=GetImageProperty(image,"film-gamma"); if (value != (const char *) NULL) film_gamma=StringToDouble(value,(char **) NULL); reference_black=ReferenceBlack; value=GetImageProperty(image,"reference-black"); if (value != (const char *) NULL) reference_black=StringToDouble(value,(char **) NULL); reference_white=ReferenceWhite; value=GetImageProperty(image,"reference-white"); if (value != (const char *) NULL) reference_white=StringToDouble(value,(char **) NULL); logmap=(Quantum *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*logmap)); if (logmap == (Quantum *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); black=pow(10.0,(reference_black-reference_white)*(gamma/density)*0.002/ film_gamma); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) logmap[i]=ScaleMapToQuantum((MagickRealType) (MaxMap*(reference_white+ log10(black+(1.0*i/MaxMap)*(1.0-black))/((gamma/density)*0.002/ film_gamma))/1024.0)); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=(ssize_t) image->columns; x != 0; x--) { Quantum blue, green, red; red=ClampToQuantum(DecodePixelGamma((MagickRealType) GetPixelRed(q))); green=ClampToQuantum(DecodePixelGamma((MagickRealType) GetPixelGreen(q))); blue=ClampToQuantum(DecodePixelGamma((MagickRealType) GetPixelBlue(q))); SetPixelRed(q,logmap[ScaleQuantumToMap(red)]); SetPixelGreen(q,logmap[ScaleQuantumToMap(green)]); SetPixelBlue(q,logmap[ScaleQuantumToMap(blue)]); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); logmap=(Quantum *) RelinquishMagickMemory(logmap); if (SetImageColorspace(image,colorspace) == MagickFalse) return(MagickFalse); return(status); } case RGBColorspace: case scRGBColorspace: { /* Transform image from sRGB to linear RGB. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { Quantum blue, green, red; red=ClampToQuantum(DecodePixelGamma((MagickRealType) GetPixelRed(q))); green=ClampToQuantum(DecodePixelGamma((MagickRealType) GetPixelGreen(q))); blue=ClampToQuantum(DecodePixelGamma((MagickRealType) GetPixelBlue(q))); SetPixelRed(q,red); SetPixelGreen(q,green); SetPixelBlue(q,blue); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,colorspace) == MagickFalse) return(MagickFalse); return(status); } default: break; } /* Allocate the tables. */ x_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*x_map)); y_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*y_map)); z_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*z_map)); if ((x_map == (TransformPacket *) NULL) || (y_map == (TransformPacket *) NULL) || (z_map == (TransformPacket *) NULL)) { if (x_map != (TransformPacket *) NULL) x_map=(TransformPacket *) RelinquishMagickMemory(x_map); if (y_map != (TransformPacket *) NULL) y_map=(TransformPacket *) RelinquishMagickMemory(y_map); if (z_map != (TransformPacket *) NULL) z_map=(TransformPacket *) RelinquishMagickMemory(z_map); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } (void) memset(&primary_info,0,sizeof(primary_info)); switch (colorspace) { case OHTAColorspace: { /* Initialize OHTA tables: I1 = 0.33333*R+0.33334*G+0.33333*B I2 = 0.50000*R+0.00000*G-0.50000*B I3 =-0.25000*R+0.50000*G-0.25000*B I and Q, normally -0.5 through 0.5, are normalized to the range 0 through QuantumRange. */ primary_info.y=(double) (MaxMap+1.0)/2.0; primary_info.z=(double) (MaxMap+1.0)/2.0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (0.33333*(double) i); y_map[i].x=(MagickRealType) (0.33334*(double) i); z_map[i].x=(MagickRealType) (0.33333*(double) i); x_map[i].y=(MagickRealType) (0.50000*(double) i); y_map[i].y=(MagickRealType) (0.00000*(double) i); z_map[i].y=(MagickRealType) (-0.50000*(double) i); x_map[i].z=(MagickRealType) (-0.25000*(double) i); y_map[i].z=(MagickRealType) (0.50000*(double) i); z_map[i].z=(MagickRealType) (-0.25000*(double) i); } break; } case Rec601LumaColorspace: { /* Initialize Rec601 luma tables: G = 0.298839*R+0.586811*G+0.114350*B */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (0.298839*(double) i); y_map[i].x=(MagickRealType) (0.586811*(double) i); z_map[i].x=(MagickRealType) (0.114350*(double) i); x_map[i].y=(MagickRealType) (0.298839*(double) i); y_map[i].y=(MagickRealType) (0.586811*(double) i); z_map[i].y=(MagickRealType) (0.114350*(double) i); x_map[i].z=(MagickRealType) (0.298839*(double) i); y_map[i].z=(MagickRealType) (0.586811*(double) i); z_map[i].z=(MagickRealType) (0.114350*(double) i); } break; } case Rec601YCbCrColorspace: { /* Initialize YCbCr tables (ITU-R BT.601): Y = 0.2988390*R+0.5868110*G+0.1143500*B Cb= -0.1687367*R-0.3312640*G+0.5000000*B Cr= 0.5000000*R-0.4186880*G-0.0813120*B Cb and Cr, normally -0.5 through 0.5, are normalized to the range 0 through QuantumRange. */ primary_info.y=(double) (MaxMap+1.0)/2.0; primary_info.z=(double) (MaxMap+1.0)/2.0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (0.298839*(double) i); y_map[i].x=(MagickRealType) (0.586811*(double) i); z_map[i].x=(MagickRealType) (0.114350*(double) i); x_map[i].y=(MagickRealType) (-0.1687367*(double) i); y_map[i].y=(MagickRealType) (-0.331264*(double) i); z_map[i].y=(MagickRealType) (0.500000*(double) i); x_map[i].z=(MagickRealType) (0.500000*(double) i); y_map[i].z=(MagickRealType) (-0.418688*(double) i); z_map[i].z=(MagickRealType) (-0.081312*(double) i); } break; } case Rec709LumaColorspace: { /* Initialize Rec709 luma tables: G = 0.212656*R+0.715158*G+0.072186*B */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (0.212656*(double) i); y_map[i].x=(MagickRealType) (0.715158*(double) i); z_map[i].x=(MagickRealType) (0.072186*(double) i); x_map[i].y=(MagickRealType) (0.212656*(double) i); y_map[i].y=(MagickRealType) (0.715158*(double) i); z_map[i].y=(MagickRealType) (0.072186*(double) i); x_map[i].z=(MagickRealType) (0.212656*(double) i); y_map[i].z=(MagickRealType) (0.715158*(double) i); z_map[i].z=(MagickRealType) (0.072186*(double) i); } break; } case Rec709YCbCrColorspace: { /* Initialize YCbCr tables (ITU-R BT.709): Y = 0.212656*R+0.715158*G+0.072186*B Cb= -0.114572*R-0.385428*G+0.500000*B Cr= 0.500000*R-0.454153*G-0.045847*B Cb and Cr, normally -0.5 through 0.5, are normalized to the range 0 through QuantumRange. */ primary_info.y=(double) (MaxMap+1.0)/2.0; primary_info.z=(double) (MaxMap+1.0)/2.0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (0.212656*(double) i); y_map[i].x=(MagickRealType) (0.715158*(double) i); z_map[i].x=(MagickRealType) (0.072186*(double) i); x_map[i].y=(MagickRealType) (-0.114572*(double) i); y_map[i].y=(MagickRealType) (-0.385428*(double) i); z_map[i].y=(MagickRealType) (0.500000*(double) i); x_map[i].z=(MagickRealType) (0.500000*(double) i); y_map[i].z=(MagickRealType) (-0.454153*(double) i); z_map[i].z=(MagickRealType) (-0.045847*(double) i); } break; } case YCCColorspace: { /* Initialize YCC tables: Y = 0.298839*R+0.586811*G+0.114350*B C1= -0.298839*R-0.586811*G+0.88600*B C2= 0.70100*R-0.586811*G-0.114350*B YCC is scaled by 1.3584. C1 zero is 156 and C2 is at 137. */ primary_info.y=(double) ScaleQuantumToMap(ScaleCharToQuantum(156)); primary_info.z=(double) ScaleQuantumToMap(ScaleCharToQuantum(137)); for (i=0; i <= (ssize_t) (0.018*MaxMap); i++) { x_map[i].x=0.005382*i; y_map[i].x=0.010566*i; z_map[i].x=0.002052*i; x_map[i].y=(-0.003296)*i; y_map[i].y=(-0.006471)*i; z_map[i].y=0.009768*i; x_map[i].z=0.009410*i; y_map[i].z=(-0.007880)*i; z_map[i].z=(-0.001530)*i; } for ( ; i <= (ssize_t) MaxMap; i++) { x_map[i].x=0.298839*(1.099*i-0.099); y_map[i].x=0.586811*(1.099*i-0.099); z_map[i].x=0.114350*(1.099*i-0.099); x_map[i].y=(-0.298839)*(1.099*i-0.099); y_map[i].y=(-0.586811)*(1.099*i-0.099); z_map[i].y=0.88600*(1.099*i-0.099); x_map[i].z=0.70100*(1.099*i-0.099); y_map[i].z=(-0.586811)*(1.099*i-0.099); z_map[i].z=(-0.114350)*(1.099*i-0.099); } break; } default: { /* Linear conversion tables. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.0*(double) i); y_map[i].x=(MagickRealType) 0.0; z_map[i].x=(MagickRealType) 0.0; x_map[i].y=(MagickRealType) 0.0; y_map[i].y=(MagickRealType) (1.0*(double) i); z_map[i].y=(MagickRealType) 0.0; x_map[i].z=(MagickRealType) 0.0; y_map[i].z=(MagickRealType) 0.0; z_map[i].z=(MagickRealType) (1.0*(double) i); } break; } } /* Convert from sRGB. */ switch (image->storage_class) { case DirectClass: default: { /* Convert DirectClass image. */ image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; MagickPixelPacket pixel; register ssize_t x; register PixelPacket *magick_restrict q; register size_t blue, green, red; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { red=ScaleQuantumToMap(ClampToQuantum((MagickRealType) GetPixelRed(q))); green=ScaleQuantumToMap(ClampToQuantum((MagickRealType) GetPixelGreen(q))); blue=ScaleQuantumToMap(ClampToQuantum((MagickRealType) GetPixelBlue(q))); pixel.red=(x_map[red].x+y_map[green].x+z_map[blue].x)+ (MagickRealType) primary_info.x; pixel.green=(x_map[red].y+y_map[green].y+z_map[blue].y)+ (MagickRealType) primary_info.y; pixel.blue=(x_map[red].z+y_map[green].z+z_map[blue].z)+ (MagickRealType) primary_info.z; SetPixelRed(q,ScaleMapToQuantum(pixel.red)); SetPixelGreen(q,ScaleMapToQuantum(pixel.green)); SetPixelBlue(q,ScaleMapToQuantum(pixel.blue)); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,RGBTransformImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); break; } case PseudoClass: { register size_t blue, green, red; /* Convert PseudoClass image. */ for (i=0; i < (ssize_t) image->colors; i++) { MagickPixelPacket pixel; red=ScaleQuantumToMap(ClampToQuantum((MagickRealType) image->colormap[i].red)); green=ScaleQuantumToMap(ClampToQuantum((MagickRealType) image->colormap[i].green)); blue=ScaleQuantumToMap(ClampToQuantum((MagickRealType) image->colormap[i].blue)); pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x+primary_info.x; pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y+primary_info.y; pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z+primary_info.z; image->colormap[i].red=ScaleMapToQuantum(pixel.red); image->colormap[i].green=ScaleMapToQuantum(pixel.green); image->colormap[i].blue=ScaleMapToQuantum(pixel.blue); } (void) SyncImage(image); break; } } /* Relinquish resources. */ z_map=(TransformPacket *) RelinquishMagickMemory(z_map); y_map=(TransformPacket *) RelinquishMagickMemory(y_map); x_map=(TransformPacket *) RelinquishMagickMemory(x_map); if (SetImageColorspace(image,colorspace) == MagickFalse) return(MagickFalse); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e C o l o r s p a c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageColorspace() sets the colorspace member of the Image structure. % % The format of the SetImageColorspace method is: % % MagickBooleanType SetImageColorspace(Image *image, % const ColorspaceType colorspace) % % A description of each parameter follows: % % o image: the image. % % o colorspace: the colorspace. % */ MagickExport MagickBooleanType SetImageColorspace(Image *image, const ColorspaceType colorspace) { ImageType type; MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->colorspace == colorspace) return(MagickTrue); image->colorspace=colorspace; image->rendering_intent=UndefinedIntent; image->gamma=1.000/2.200; (void) memset(&image->chromaticity,0,sizeof(image->chromaticity)); type=image->type; if (IsGrayColorspace(colorspace) != MagickFalse) { if (colorspace == LinearGRAYColorspace) image->gamma=1.0; type=GrayscaleType; } else if ((IsRGBColorspace(colorspace) != MagickFalse) || (colorspace == XYZColorspace) || (colorspace == xyYColorspace)) image->gamma=1.0; else { image->rendering_intent=PerceptualIntent; image->chromaticity.red_primary.x=0.6400; image->chromaticity.red_primary.y=0.3300; image->chromaticity.red_primary.z=0.0300; image->chromaticity.green_primary.x=0.3000; image->chromaticity.green_primary.y=0.6000; image->chromaticity.green_primary.z=0.1000; image->chromaticity.blue_primary.x=0.1500; image->chromaticity.blue_primary.y=0.0600; image->chromaticity.blue_primary.z=0.7900; image->chromaticity.white_point.x=0.3127; image->chromaticity.white_point.y=0.3290; image->chromaticity.white_point.z=0.3583; } status=SyncImagePixelCache(image,&image->exception); image->type=type; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e G r a y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageGray() returns MagickTrue if all the pixels in the image have the % same red, green, and blue intensities and changes the type of the image to % bi-level or grayscale. % % The format of the SetImageGray method is: % % MagickBooleanType SetImageGray(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageGray(Image *image, ExceptionInfo *exception) { const char *value; CacheView *image_view; ImageType type; register const PixelPacket *p; register ssize_t x; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((image->type == BilevelType) || (image->type == GrayscaleType) || (image->type == GrayscaleMatteType)) return(MagickTrue); if ((IsGrayColorspace(image->colorspace) == MagickFalse) && (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)) return(MagickFalse); value=GetImageProperty(image,"colorspace:auto-grayscale"); if (IsStringNotFalse(value) == MagickFalse) return(MagickFalse); type=BilevelType; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (IsGrayPixel(p) == MagickFalse) { type=UndefinedType; break; } if ((type == BilevelType) && (IsMonochromePixel(p) == MagickFalse)) type=GrayscaleType; p++; } if (type == UndefinedType) break; } image_view=DestroyCacheView(image_view); if (type == UndefinedType) return(MagickFalse); image->colorspace=GRAYColorspace; if (SyncImagePixelCache((Image *) image,exception) == MagickFalse) return(MagickFalse); image->type=type; if ((type == GrayscaleType) && (image->matte != MagickFalse)) image->type=GrayscaleMatteType; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e M o n o c h r o m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageMonochrome() returns MagickTrue if all the pixels in the image have % the same red, green, and blue intensities and the intensity is either % 0 or QuantumRange and changes the type of the image to bi-level. % % The format of the SetImageMonochrome method is: % % MagickBooleanType SetImageMonochrome(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageMonochrome(Image *image, ExceptionInfo *exception) { const char *value; CacheView *image_view; ImageType type; register ssize_t x; register const PixelPacket *p; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->type == BilevelType) return(MagickTrue); if ((IsGrayColorspace(image->colorspace) == MagickFalse) && (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)) return(MagickFalse); value=GetImageProperty(image,"colorspace:auto-grayscale"); if (IsStringNotFalse(value) == MagickFalse) return(MagickFalse); type=BilevelType; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (IsMonochromePixel(p) == MagickFalse) { type=UndefinedType; break; } p++; } if (type == UndefinedType) break; } image_view=DestroyCacheView(image_view); if (type == UndefinedType) return(MagickFalse); image->colorspace=GRAYColorspace; if (SyncImagePixelCache((Image *) image,exception) == MagickFalse) return(MagickFalse); image->type=type; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s f o r m I m a g e C o l o r s p a c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransformImageColorspace() transforms an image colorspace. % % The format of the TransformImageColorspace method is: % % MagickBooleanType TransformImageColorspace(Image *image, % const ColorspaceType colorspace) % % A description of each parameter follows: % % o image: the image. % % o colorspace: the colorspace. % */ MagickExport MagickBooleanType TransformImageColorspace(Image *image, const ColorspaceType colorspace) { MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->colorspace == colorspace) return(MagickTrue); (void) DeleteImageProfile(image,"icc"); (void) DeleteImageProfile(image,"icm"); if (colorspace == UndefinedColorspace) return(SetImageColorspace(image,colorspace)); /* Convert the reference image from an alternate colorspace to sRGB. */ if (IssRGBColorspace(colorspace) != MagickFalse) return(TransformRGBImage(image,image->colorspace)); status=MagickTrue; if (IssRGBColorspace(image->colorspace) == MagickFalse) status=TransformRGBImage(image,image->colorspace); if (status == MagickFalse) return(status); /* Convert the reference image from sRGB to an alternate colorspace. */ if (RGBTransformImage(image,colorspace) == MagickFalse) status=MagickFalse; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + T r a n s f o r m R G B I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransformRGBImage() converts the reference image from an alternate % colorspace to sRGB. The transformation matrices are not the standard ones: % the weights are rescaled to normalize the range of the transformed values to % be [0..QuantumRange]. % % The format of the TransformRGBImage method is: % % MagickBooleanType TransformRGBImage(Image *image, % const ColorspaceType colorspace) % % A description of each parameter follows: % % o image: the image. % % o colorspace: the colorspace to transform the image to. % */ static inline void ConvertCMYToRGB(const double cyan,const double magenta, const double yellow,Quantum *red,Quantum *green,Quantum *blue) { *red=ClampToQuantum(QuantumRange*(1.0-cyan)); *green=ClampToQuantum(QuantumRange*(1.0-magenta)); *blue=ClampToQuantum(QuantumRange*(1.0-yellow)); } static inline void ConvertLMSToXYZ(const double L,const double M,const double S, double *X,double *Y,double *Z) { *X=1.096123820835514*L-0.278869000218287*M+0.182745179382773*S; *Y=0.454369041975359*L+0.473533154307412*M+0.072097803717229*S; *Z=(-0.009627608738429)*L-0.005698031216113*M+1.015325639954543*S; } static inline void ConvertLMSToRGB(const double L,const double M, const double S,Quantum *red,Quantum *green,Quantum *blue) { double X, Y, Z; ConvertLMSToXYZ(L,M,S,&X,&Y,&Z); ConvertXYZToRGB(X,Y,Z,red,green,blue); } static inline void ConvertLuvToRGB(const double L,const double u, const double v,Quantum *red,Quantum *green,Quantum *blue) { double X, Y, Z; ConvertLuvToXYZ(100.0*L,354.0*u-134.0,262.0*v-140.0,&X,&Y,&Z); ConvertXYZToRGB(X,Y,Z,red,green,blue); } static inline ssize_t RoundToYCC(const MagickRealType value) { if (value <= 0.0) return(0); if (value >= 1388.0) return(1388); return((ssize_t) (value+0.5)); } static inline void ConvertLabToRGB(const double L,const double a, const double b,Quantum *red,Quantum *green,Quantum *blue) { double X, Y, Z; ConvertLabToXYZ(100.0*L,255.0*(a-0.5),255.0*(b-0.5),&X,&Y,&Z); ConvertXYZToRGB(X,Y,Z,red,green,blue); } static inline void ConvertxyYToRGB(const double low_x,const double low_y, const double cap_Y,Quantum *red,Quantum *green,Quantum *blue) { double gamma, X, Y, Z; gamma=PerceptibleReciprocal(low_y); X=gamma*cap_Y*low_x; Y=cap_Y; Z=gamma*cap_Y*(1.0-low_x-low_y); ConvertXYZToRGB(X,Y,Z,red,green,blue); } static void ConvertYPbPrToRGB(const double Y,const double Pb,const double Pr, Quantum *red,Quantum *green,Quantum *blue) { *red=ClampToQuantum(QuantumRange*(0.99999999999914679361*Y- 1.2188941887145875e-06*(Pb-0.5)+1.4019995886561440468*(Pr-0.5))); *green=ClampToQuantum(QuantumRange*(0.99999975910502514331*Y- 0.34413567816504303521*(Pb-0.5)-0.71413649331646789076*(Pr-0.5))); *blue=ClampToQuantum(QuantumRange*(1.00000124040004623180*Y+ 1.77200006607230409200*(Pb-0.5)+2.1453384174593273e-06*(Pr-0.5))); } static void ConvertYCbCrToRGB(const double Y,const double Cb, const double Cr,Quantum *red,Quantum *green,Quantum *blue) { ConvertYPbPrToRGB(Y,Cb,Cr,red,green,blue); } static void ConvertYDbDrToRGB(const double Y,const double Db,const double Dr, Quantum *red,Quantum *green,Quantum *blue) { *red=ClampToQuantum(QuantumRange*(Y+9.2303716147657e-05*(Db-0.5)- 0.52591263066186533*(Dr-0.5))); *green=ClampToQuantum(QuantumRange*(Y-0.12913289889050927*(Db-0.5)+ 0.26789932820759876*(Dr-0.5))); *blue=ClampToQuantum(QuantumRange*(Y+0.66467905997895482*(Db-0.5)- 7.9202543533108e-05*(Dr-0.5))); } static void ConvertYIQToRGB(const double Y,const double I,const double Q, Quantum *red,Quantum *green,Quantum *blue) { *red=ClampToQuantum(QuantumRange*(Y+0.9562957197589482261*(I-0.5)+ 0.6210244164652610754*(Q-0.5))); *green=ClampToQuantum(QuantumRange*(Y-0.2721220993185104464*(I-0.5)- 0.6473805968256950427*(Q-0.5))); *blue=ClampToQuantum(QuantumRange*(Y-1.1069890167364901945*(I-0.5)+ 1.7046149983646481374*(Q-0.5))); } static void ConvertYUVToRGB(const double Y,const double U,const double V, Quantum *red,Quantum *green,Quantum *blue) { *red=ClampToQuantum(QuantumRange*(Y-3.945707070708279e-05*(U-0.5)+ 1.1398279671717170825*(V-0.5))); *green=ClampToQuantum(QuantumRange*(Y-0.3946101641414141437*(U-0.5)- 0.5805003156565656797*(V-0.5))); *blue=ClampToQuantum(QuantumRange*(Y+2.0319996843434342537*(U-0.5)- 4.813762626262513e-04*(V-0.5))); } MagickExport MagickBooleanType TransformRGBImage(Image *image, const ColorspaceType colorspace) { #define TransformRGBImageTag "Transform/Image" static const float YCCMap[1389] = { 0.000000, 0.000720f, 0.001441f, 0.002161f, 0.002882f, 0.003602f, 0.004323f, 0.005043f, 0.005764f, 0.006484f, 0.007205f, 0.007925f, 0.008646f, 0.009366f, 0.010086f, 0.010807f, 0.011527f, 0.012248f, 0.012968f, 0.013689f, 0.014409f, 0.015130f, 0.015850f, 0.016571f, 0.017291f, 0.018012f, 0.018732f, 0.019452f, 0.020173f, 0.020893f, 0.021614f, 0.022334f, 0.023055f, 0.023775f, 0.024496f, 0.025216f, 0.025937f, 0.026657f, 0.027378f, 0.028098f, 0.028818f, 0.029539f, 0.030259f, 0.030980f, 0.031700f, 0.032421f, 0.033141f, 0.033862f, 0.034582f, 0.035303f, 0.036023f, 0.036744f, 0.037464f, 0.038184f, 0.038905f, 0.039625f, 0.040346f, 0.041066f, 0.041787f, 0.042507f, 0.043228f, 0.043948f, 0.044669f, 0.045389f, 0.046110f, 0.046830f, 0.047550f, 0.048271f, 0.048991f, 0.049712f, 0.050432f, 0.051153f, 0.051873f, 0.052594f, 0.053314f, 0.054035f, 0.054755f, 0.055476f, 0.056196f, 0.056916f, 0.057637f, 0.058357f, 0.059078f, 0.059798f, 0.060519f, 0.061239f, 0.061960f, 0.062680f, 0.063401f, 0.064121f, 0.064842f, 0.065562f, 0.066282f, 0.067003f, 0.067723f, 0.068444f, 0.069164f, 0.069885f, 0.070605f, 0.071326f, 0.072046f, 0.072767f, 0.073487f, 0.074207f, 0.074928f, 0.075648f, 0.076369f, 0.077089f, 0.077810f, 0.078530f, 0.079251f, 0.079971f, 0.080692f, 0.081412f, 0.082133f, 0.082853f, 0.083573f, 0.084294f, 0.085014f, 0.085735f, 0.086455f, 0.087176f, 0.087896f, 0.088617f, 0.089337f, 0.090058f, 0.090778f, 0.091499f, 0.092219f, 0.092939f, 0.093660f, 0.094380f, 0.095101f, 0.095821f, 0.096542f, 0.097262f, 0.097983f, 0.098703f, 0.099424f, 0.100144f, 0.100865f, 0.101585f, 0.102305f, 0.103026f, 0.103746f, 0.104467f, 0.105187f, 0.105908f, 0.106628f, 0.107349f, 0.108069f, 0.108790f, 0.109510f, 0.110231f, 0.110951f, 0.111671f, 0.112392f, 0.113112f, 0.113833f, 0.114553f, 0.115274f, 0.115994f, 0.116715f, 0.117435f, 0.118156f, 0.118876f, 0.119597f, 0.120317f, 0.121037f, 0.121758f, 0.122478f, 0.123199f, 0.123919f, 0.124640f, 0.125360f, 0.126081f, 0.126801f, 0.127522f, 0.128242f, 0.128963f, 0.129683f, 0.130403f, 0.131124f, 0.131844f, 0.132565f, 0.133285f, 0.134006f, 0.134726f, 0.135447f, 0.136167f, 0.136888f, 0.137608f, 0.138329f, 0.139049f, 0.139769f, 0.140490f, 0.141210f, 0.141931f, 0.142651f, 0.143372f, 0.144092f, 0.144813f, 0.145533f, 0.146254f, 0.146974f, 0.147695f, 0.148415f, 0.149135f, 0.149856f, 0.150576f, 0.151297f, 0.152017f, 0.152738f, 0.153458f, 0.154179f, 0.154899f, 0.155620f, 0.156340f, 0.157061f, 0.157781f, 0.158501f, 0.159222f, 0.159942f, 0.160663f, 0.161383f, 0.162104f, 0.162824f, 0.163545f, 0.164265f, 0.164986f, 0.165706f, 0.166427f, 0.167147f, 0.167867f, 0.168588f, 0.169308f, 0.170029f, 0.170749f, 0.171470f, 0.172190f, 0.172911f, 0.173631f, 0.174352f, 0.175072f, 0.175793f, 0.176513f, 0.177233f, 0.177954f, 0.178674f, 0.179395f, 0.180115f, 0.180836f, 0.181556f, 0.182277f, 0.182997f, 0.183718f, 0.184438f, 0.185159f, 0.185879f, 0.186599f, 0.187320f, 0.188040f, 0.188761f, 0.189481f, 0.190202f, 0.190922f, 0.191643f, 0.192363f, 0.193084f, 0.193804f, 0.194524f, 0.195245f, 0.195965f, 0.196686f, 0.197406f, 0.198127f, 0.198847f, 0.199568f, 0.200288f, 0.201009f, 0.201729f, 0.202450f, 0.203170f, 0.203890f, 0.204611f, 0.205331f, 0.206052f, 0.206772f, 0.207493f, 0.208213f, 0.208934f, 0.209654f, 0.210375f, 0.211095f, 0.211816f, 0.212536f, 0.213256f, 0.213977f, 0.214697f, 0.215418f, 0.216138f, 0.216859f, 0.217579f, 0.218300f, 0.219020f, 0.219741f, 0.220461f, 0.221182f, 0.221902f, 0.222622f, 0.223343f, 0.224063f, 0.224784f, 0.225504f, 0.226225f, 0.226945f, 0.227666f, 0.228386f, 0.229107f, 0.229827f, 0.230548f, 0.231268f, 0.231988f, 0.232709f, 0.233429f, 0.234150f, 0.234870f, 0.235591f, 0.236311f, 0.237032f, 0.237752f, 0.238473f, 0.239193f, 0.239914f, 0.240634f, 0.241354f, 0.242075f, 0.242795f, 0.243516f, 0.244236f, 0.244957f, 0.245677f, 0.246398f, 0.247118f, 0.247839f, 0.248559f, 0.249280f, 0.250000f, 0.250720f, 0.251441f, 0.252161f, 0.252882f, 0.253602f, 0.254323f, 0.255043f, 0.255764f, 0.256484f, 0.257205f, 0.257925f, 0.258646f, 0.259366f, 0.260086f, 0.260807f, 0.261527f, 0.262248f, 0.262968f, 0.263689f, 0.264409f, 0.265130f, 0.265850f, 0.266571f, 0.267291f, 0.268012f, 0.268732f, 0.269452f, 0.270173f, 0.270893f, 0.271614f, 0.272334f, 0.273055f, 0.273775f, 0.274496f, 0.275216f, 0.275937f, 0.276657f, 0.277378f, 0.278098f, 0.278818f, 0.279539f, 0.280259f, 0.280980f, 0.281700f, 0.282421f, 0.283141f, 0.283862f, 0.284582f, 0.285303f, 0.286023f, 0.286744f, 0.287464f, 0.288184f, 0.288905f, 0.289625f, 0.290346f, 0.291066f, 0.291787f, 0.292507f, 0.293228f, 0.293948f, 0.294669f, 0.295389f, 0.296109f, 0.296830f, 0.297550f, 0.298271f, 0.298991f, 0.299712f, 0.300432f, 0.301153f, 0.301873f, 0.302594f, 0.303314f, 0.304035f, 0.304755f, 0.305476f, 0.306196f, 0.306916f, 0.307637f, 0.308357f, 0.309078f, 0.309798f, 0.310519f, 0.311239f, 0.311960f, 0.312680f, 0.313401f, 0.314121f, 0.314842f, 0.315562f, 0.316282f, 0.317003f, 0.317723f, 0.318444f, 0.319164f, 0.319885f, 0.320605f, 0.321326f, 0.322046f, 0.322767f, 0.323487f, 0.324207f, 0.324928f, 0.325648f, 0.326369f, 0.327089f, 0.327810f, 0.328530f, 0.329251f, 0.329971f, 0.330692f, 0.331412f, 0.332133f, 0.332853f, 0.333573f, 0.334294f, 0.335014f, 0.335735f, 0.336455f, 0.337176f, 0.337896f, 0.338617f, 0.339337f, 0.340058f, 0.340778f, 0.341499f, 0.342219f, 0.342939f, 0.343660f, 0.344380f, 0.345101f, 0.345821f, 0.346542f, 0.347262f, 0.347983f, 0.348703f, 0.349424f, 0.350144f, 0.350865f, 0.351585f, 0.352305f, 0.353026f, 0.353746f, 0.354467f, 0.355187f, 0.355908f, 0.356628f, 0.357349f, 0.358069f, 0.358790f, 0.359510f, 0.360231f, 0.360951f, 0.361671f, 0.362392f, 0.363112f, 0.363833f, 0.364553f, 0.365274f, 0.365994f, 0.366715f, 0.367435f, 0.368156f, 0.368876f, 0.369597f, 0.370317f, 0.371037f, 0.371758f, 0.372478f, 0.373199f, 0.373919f, 0.374640f, 0.375360f, 0.376081f, 0.376801f, 0.377522f, 0.378242f, 0.378963f, 0.379683f, 0.380403f, 0.381124f, 0.381844f, 0.382565f, 0.383285f, 0.384006f, 0.384726f, 0.385447f, 0.386167f, 0.386888f, 0.387608f, 0.388329f, 0.389049f, 0.389769f, 0.390490f, 0.391210f, 0.391931f, 0.392651f, 0.393372f, 0.394092f, 0.394813f, 0.395533f, 0.396254f, 0.396974f, 0.397695f, 0.398415f, 0.399135f, 0.399856f, 0.400576f, 0.401297f, 0.402017f, 0.402738f, 0.403458f, 0.404179f, 0.404899f, 0.405620f, 0.406340f, 0.407061f, 0.407781f, 0.408501f, 0.409222f, 0.409942f, 0.410663f, 0.411383f, 0.412104f, 0.412824f, 0.413545f, 0.414265f, 0.414986f, 0.415706f, 0.416427f, 0.417147f, 0.417867f, 0.418588f, 0.419308f, 0.420029f, 0.420749f, 0.421470f, 0.422190f, 0.422911f, 0.423631f, 0.424352f, 0.425072f, 0.425793f, 0.426513f, 0.427233f, 0.427954f, 0.428674f, 0.429395f, 0.430115f, 0.430836f, 0.431556f, 0.432277f, 0.432997f, 0.433718f, 0.434438f, 0.435158f, 0.435879f, 0.436599f, 0.437320f, 0.438040f, 0.438761f, 0.439481f, 0.440202f, 0.440922f, 0.441643f, 0.442363f, 0.443084f, 0.443804f, 0.444524f, 0.445245f, 0.445965f, 0.446686f, 0.447406f, 0.448127f, 0.448847f, 0.449568f, 0.450288f, 0.451009f, 0.451729f, 0.452450f, 0.453170f, 0.453891f, 0.454611f, 0.455331f, 0.456052f, 0.456772f, 0.457493f, 0.458213f, 0.458934f, 0.459654f, 0.460375f, 0.461095f, 0.461816f, 0.462536f, 0.463256f, 0.463977f, 0.464697f, 0.465418f, 0.466138f, 0.466859f, 0.467579f, 0.468300f, 0.469020f, 0.469741f, 0.470461f, 0.471182f, 0.471902f, 0.472622f, 0.473343f, 0.474063f, 0.474784f, 0.475504f, 0.476225f, 0.476945f, 0.477666f, 0.478386f, 0.479107f, 0.479827f, 0.480548f, 0.481268f, 0.481988f, 0.482709f, 0.483429f, 0.484150f, 0.484870f, 0.485591f, 0.486311f, 0.487032f, 0.487752f, 0.488473f, 0.489193f, 0.489914f, 0.490634f, 0.491354f, 0.492075f, 0.492795f, 0.493516f, 0.494236f, 0.494957f, 0.495677f, 0.496398f, 0.497118f, 0.497839f, 0.498559f, 0.499280f, 0.500000f, 0.500720f, 0.501441f, 0.502161f, 0.502882f, 0.503602f, 0.504323f, 0.505043f, 0.505764f, 0.506484f, 0.507205f, 0.507925f, 0.508646f, 0.509366f, 0.510086f, 0.510807f, 0.511527f, 0.512248f, 0.512968f, 0.513689f, 0.514409f, 0.515130f, 0.515850f, 0.516571f, 0.517291f, 0.518012f, 0.518732f, 0.519452f, 0.520173f, 0.520893f, 0.521614f, 0.522334f, 0.523055f, 0.523775f, 0.524496f, 0.525216f, 0.525937f, 0.526657f, 0.527378f, 0.528098f, 0.528818f, 0.529539f, 0.530259f, 0.530980f, 0.531700f, 0.532421f, 0.533141f, 0.533862f, 0.534582f, 0.535303f, 0.536023f, 0.536744f, 0.537464f, 0.538184f, 0.538905f, 0.539625f, 0.540346f, 0.541066f, 0.541787f, 0.542507f, 0.543228f, 0.543948f, 0.544669f, 0.545389f, 0.546109f, 0.546830f, 0.547550f, 0.548271f, 0.548991f, 0.549712f, 0.550432f, 0.551153f, 0.551873f, 0.552594f, 0.553314f, 0.554035f, 0.554755f, 0.555476f, 0.556196f, 0.556916f, 0.557637f, 0.558357f, 0.559078f, 0.559798f, 0.560519f, 0.561239f, 0.561960f, 0.562680f, 0.563401f, 0.564121f, 0.564842f, 0.565562f, 0.566282f, 0.567003f, 0.567723f, 0.568444f, 0.569164f, 0.569885f, 0.570605f, 0.571326f, 0.572046f, 0.572767f, 0.573487f, 0.574207f, 0.574928f, 0.575648f, 0.576369f, 0.577089f, 0.577810f, 0.578530f, 0.579251f, 0.579971f, 0.580692f, 0.581412f, 0.582133f, 0.582853f, 0.583573f, 0.584294f, 0.585014f, 0.585735f, 0.586455f, 0.587176f, 0.587896f, 0.588617f, 0.589337f, 0.590058f, 0.590778f, 0.591499f, 0.592219f, 0.592939f, 0.593660f, 0.594380f, 0.595101f, 0.595821f, 0.596542f, 0.597262f, 0.597983f, 0.598703f, 0.599424f, 0.600144f, 0.600865f, 0.601585f, 0.602305f, 0.603026f, 0.603746f, 0.604467f, 0.605187f, 0.605908f, 0.606628f, 0.607349f, 0.608069f, 0.608790f, 0.609510f, 0.610231f, 0.610951f, 0.611671f, 0.612392f, 0.613112f, 0.613833f, 0.614553f, 0.615274f, 0.615994f, 0.616715f, 0.617435f, 0.618156f, 0.618876f, 0.619597f, 0.620317f, 0.621037f, 0.621758f, 0.622478f, 0.623199f, 0.623919f, 0.624640f, 0.625360f, 0.626081f, 0.626801f, 0.627522f, 0.628242f, 0.628963f, 0.629683f, 0.630403f, 0.631124f, 0.631844f, 0.632565f, 0.633285f, 0.634006f, 0.634726f, 0.635447f, 0.636167f, 0.636888f, 0.637608f, 0.638329f, 0.639049f, 0.639769f, 0.640490f, 0.641210f, 0.641931f, 0.642651f, 0.643372f, 0.644092f, 0.644813f, 0.645533f, 0.646254f, 0.646974f, 0.647695f, 0.648415f, 0.649135f, 0.649856f, 0.650576f, 0.651297f, 0.652017f, 0.652738f, 0.653458f, 0.654179f, 0.654899f, 0.655620f, 0.656340f, 0.657061f, 0.657781f, 0.658501f, 0.659222f, 0.659942f, 0.660663f, 0.661383f, 0.662104f, 0.662824f, 0.663545f, 0.664265f, 0.664986f, 0.665706f, 0.666427f, 0.667147f, 0.667867f, 0.668588f, 0.669308f, 0.670029f, 0.670749f, 0.671470f, 0.672190f, 0.672911f, 0.673631f, 0.674352f, 0.675072f, 0.675793f, 0.676513f, 0.677233f, 0.677954f, 0.678674f, 0.679395f, 0.680115f, 0.680836f, 0.681556f, 0.682277f, 0.682997f, 0.683718f, 0.684438f, 0.685158f, 0.685879f, 0.686599f, 0.687320f, 0.688040f, 0.688761f, 0.689481f, 0.690202f, 0.690922f, 0.691643f, 0.692363f, 0.693084f, 0.693804f, 0.694524f, 0.695245f, 0.695965f, 0.696686f, 0.697406f, 0.698127f, 0.698847f, 0.699568f, 0.700288f, 0.701009f, 0.701729f, 0.702450f, 0.703170f, 0.703891f, 0.704611f, 0.705331f, 0.706052f, 0.706772f, 0.707493f, 0.708213f, 0.708934f, 0.709654f, 0.710375f, 0.711095f, 0.711816f, 0.712536f, 0.713256f, 0.713977f, 0.714697f, 0.715418f, 0.716138f, 0.716859f, 0.717579f, 0.718300f, 0.719020f, 0.719741f, 0.720461f, 0.721182f, 0.721902f, 0.722622f, 0.723343f, 0.724063f, 0.724784f, 0.725504f, 0.726225f, 0.726945f, 0.727666f, 0.728386f, 0.729107f, 0.729827f, 0.730548f, 0.731268f, 0.731988f, 0.732709f, 0.733429f, 0.734150f, 0.734870f, 0.735591f, 0.736311f, 0.737032f, 0.737752f, 0.738473f, 0.739193f, 0.739914f, 0.740634f, 0.741354f, 0.742075f, 0.742795f, 0.743516f, 0.744236f, 0.744957f, 0.745677f, 0.746398f, 0.747118f, 0.747839f, 0.748559f, 0.749280f, 0.750000f, 0.750720f, 0.751441f, 0.752161f, 0.752882f, 0.753602f, 0.754323f, 0.755043f, 0.755764f, 0.756484f, 0.757205f, 0.757925f, 0.758646f, 0.759366f, 0.760086f, 0.760807f, 0.761527f, 0.762248f, 0.762968f, 0.763689f, 0.764409f, 0.765130f, 0.765850f, 0.766571f, 0.767291f, 0.768012f, 0.768732f, 0.769452f, 0.770173f, 0.770893f, 0.771614f, 0.772334f, 0.773055f, 0.773775f, 0.774496f, 0.775216f, 0.775937f, 0.776657f, 0.777378f, 0.778098f, 0.778818f, 0.779539f, 0.780259f, 0.780980f, 0.781700f, 0.782421f, 0.783141f, 0.783862f, 0.784582f, 0.785303f, 0.786023f, 0.786744f, 0.787464f, 0.788184f, 0.788905f, 0.789625f, 0.790346f, 0.791066f, 0.791787f, 0.792507f, 0.793228f, 0.793948f, 0.794669f, 0.795389f, 0.796109f, 0.796830f, 0.797550f, 0.798271f, 0.798991f, 0.799712f, 0.800432f, 0.801153f, 0.801873f, 0.802594f, 0.803314f, 0.804035f, 0.804755f, 0.805476f, 0.806196f, 0.806916f, 0.807637f, 0.808357f, 0.809078f, 0.809798f, 0.810519f, 0.811239f, 0.811960f, 0.812680f, 0.813401f, 0.814121f, 0.814842f, 0.815562f, 0.816282f, 0.817003f, 0.817723f, 0.818444f, 0.819164f, 0.819885f, 0.820605f, 0.821326f, 0.822046f, 0.822767f, 0.823487f, 0.824207f, 0.824928f, 0.825648f, 0.826369f, 0.827089f, 0.827810f, 0.828530f, 0.829251f, 0.829971f, 0.830692f, 0.831412f, 0.832133f, 0.832853f, 0.833573f, 0.834294f, 0.835014f, 0.835735f, 0.836455f, 0.837176f, 0.837896f, 0.838617f, 0.839337f, 0.840058f, 0.840778f, 0.841499f, 0.842219f, 0.842939f, 0.843660f, 0.844380f, 0.845101f, 0.845821f, 0.846542f, 0.847262f, 0.847983f, 0.848703f, 0.849424f, 0.850144f, 0.850865f, 0.851585f, 0.852305f, 0.853026f, 0.853746f, 0.854467f, 0.855187f, 0.855908f, 0.856628f, 0.857349f, 0.858069f, 0.858790f, 0.859510f, 0.860231f, 0.860951f, 0.861671f, 0.862392f, 0.863112f, 0.863833f, 0.864553f, 0.865274f, 0.865994f, 0.866715f, 0.867435f, 0.868156f, 0.868876f, 0.869597f, 0.870317f, 0.871037f, 0.871758f, 0.872478f, 0.873199f, 0.873919f, 0.874640f, 0.875360f, 0.876081f, 0.876801f, 0.877522f, 0.878242f, 0.878963f, 0.879683f, 0.880403f, 0.881124f, 0.881844f, 0.882565f, 0.883285f, 0.884006f, 0.884726f, 0.885447f, 0.886167f, 0.886888f, 0.887608f, 0.888329f, 0.889049f, 0.889769f, 0.890490f, 0.891210f, 0.891931f, 0.892651f, 0.893372f, 0.894092f, 0.894813f, 0.895533f, 0.896254f, 0.896974f, 0.897695f, 0.898415f, 0.899135f, 0.899856f, 0.900576f, 0.901297f, 0.902017f, 0.902738f, 0.903458f, 0.904179f, 0.904899f, 0.905620f, 0.906340f, 0.907061f, 0.907781f, 0.908501f, 0.909222f, 0.909942f, 0.910663f, 0.911383f, 0.912104f, 0.912824f, 0.913545f, 0.914265f, 0.914986f, 0.915706f, 0.916427f, 0.917147f, 0.917867f, 0.918588f, 0.919308f, 0.920029f, 0.920749f, 0.921470f, 0.922190f, 0.922911f, 0.923631f, 0.924352f, 0.925072f, 0.925793f, 0.926513f, 0.927233f, 0.927954f, 0.928674f, 0.929395f, 0.930115f, 0.930836f, 0.931556f, 0.932277f, 0.932997f, 0.933718f, 0.934438f, 0.935158f, 0.935879f, 0.936599f, 0.937320f, 0.938040f, 0.938761f, 0.939481f, 0.940202f, 0.940922f, 0.941643f, 0.942363f, 0.943084f, 0.943804f, 0.944524f, 0.945245f, 0.945965f, 0.946686f, 0.947406f, 0.948127f, 0.948847f, 0.949568f, 0.950288f, 0.951009f, 0.951729f, 0.952450f, 0.953170f, 0.953891f, 0.954611f, 0.955331f, 0.956052f, 0.956772f, 0.957493f, 0.958213f, 0.958934f, 0.959654f, 0.960375f, 0.961095f, 0.961816f, 0.962536f, 0.963256f, 0.963977f, 0.964697f, 0.965418f, 0.966138f, 0.966859f, 0.967579f, 0.968300f, 0.969020f, 0.969741f, 0.970461f, 0.971182f, 0.971902f, 0.972622f, 0.973343f, 0.974063f, 0.974784f, 0.975504f, 0.976225f, 0.976945f, 0.977666f, 0.978386f, 0.979107f, 0.979827f, 0.980548f, 0.981268f, 0.981988f, 0.982709f, 0.983429f, 0.984150f, 0.984870f, 0.985591f, 0.986311f, 0.987032f, 0.987752f, 0.988473f, 0.989193f, 0.989914f, 0.990634f, 0.991354f, 0.992075f, 0.992795f, 0.993516f, 0.994236f, 0.994957f, 0.995677f, 0.996398f, 0.997118f, 0.997839f, 0.998559f, 0.999280f, 1.000000 }; CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; TransformPacket *y_map, *x_map, *z_map; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=MagickTrue; progress=0; exception=(&image->exception); switch (colorspace) { case CMYKColorspace: { MagickPixelPacket zero; /* Transform image from CMYK to sRGB. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } GetMagickPixelPacket(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; MagickPixelPacket pixel; register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,q,indexes+x,&pixel); ConvertCMYKToRGB(&pixel); SetPixelPacket(image,&pixel,q,indexes+x); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace) == MagickFalse) return(MagickFalse); return(status); } case GRAYColorspace: case Rec601LumaColorspace: case Rec709LumaColorspace: { /* Transform linear RGB to sRGB colorspace. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } if (SetImageColorspace(image,sRGBColorspace) == MagickFalse) return(MagickFalse); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=(ssize_t) image->columns; x != 0; x--) { MagickRealType gray; gray=(MagickRealType) GetPixelGray(q); if ((image->intensity == Rec601LuminancePixelIntensityMethod) || (image->intensity == Rec709LuminancePixelIntensityMethod)) gray=EncodePixelGamma(gray); SetPixelRed(q,ClampToQuantum(gray)); SetPixelGreen(q,ClampToQuantum(gray)); SetPixelBlue(q,ClampToQuantum(gray)); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace) == MagickFalse) return(MagickFalse); return(status); } case CMYColorspace: case HCLColorspace: case HCLpColorspace: case HSBColorspace: case HSIColorspace: case HSLColorspace: case HSVColorspace: case HWBColorspace: case LabColorspace: case LCHColorspace: case LCHabColorspace: case LCHuvColorspace: case LMSColorspace: case LuvColorspace: case xyYColorspace: case XYZColorspace: case YCbCrColorspace: case YDbDrColorspace: case YIQColorspace: case YPbPrColorspace: case YUVColorspace: { /* Transform image from source colorspace to sRGB. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double X, Y, Z; Quantum blue, green, red; X=QuantumScale*GetPixelRed(q); Y=QuantumScale*GetPixelGreen(q); Z=QuantumScale*GetPixelBlue(q); switch (colorspace) { case CMYColorspace: { ConvertCMYToRGB(X,Y,Z,&red,&green,&blue); break; } case HCLColorspace: { ConvertHCLToRGB(X,Y,Z,&red,&green,&blue); break; } case HCLpColorspace: { ConvertHCLpToRGB(X,Y,Z,&red,&green,&blue); break; } case HSBColorspace: { ConvertHSBToRGB(X,Y,Z,&red,&green,&blue); break; } case HSIColorspace: { ConvertHSIToRGB(X,Y,Z,&red,&green,&blue); break; } case HSLColorspace: { ConvertHSLToRGB(X,Y,Z,&red,&green,&blue); break; } case HSVColorspace: { ConvertHSVToRGB(X,Y,Z,&red,&green,&blue); break; } case HWBColorspace: { ConvertHWBToRGB(X,Y,Z,&red,&green,&blue); break; } case LabColorspace: { ConvertLabToRGB(X,Y,Z,&red,&green,&blue); break; } case LCHColorspace: case LCHabColorspace: { ConvertLCHabToRGB(X,Y,Z,&red,&green,&blue); break; } case LCHuvColorspace: { ConvertLCHuvToRGB(X,Y,Z,&red,&green,&blue); break; } case LMSColorspace: { ConvertLMSToRGB(X,Y,Z,&red,&green,&blue); break; } case LuvColorspace: { ConvertLuvToRGB(X,Y,Z,&red,&green,&blue); break; } case xyYColorspace: { ConvertxyYToRGB(X,Y,Z,&red,&green,&blue); break; } case XYZColorspace: { ConvertXYZToRGB(X,Y,Z,&red,&green,&blue); break; } case YCbCrColorspace: { ConvertYCbCrToRGB(X,Y,Z,&red,&green,&blue); break; } case YDbDrColorspace: { ConvertYDbDrToRGB(X,Y,Z,&red,&green,&blue); break; } case YIQColorspace: { ConvertYIQToRGB(X,Y,Z,&red,&green,&blue); break; } case YPbPrColorspace: { ConvertYPbPrToRGB(X,Y,Z,&red,&green,&blue); break; } case YUVColorspace: { ConvertYUVToRGB(X,Y,Z,&red,&green,&blue); break; } default: { red=ClampToQuantum(QuantumRange*X); green=ClampToQuantum(QuantumRange*Y); blue=ClampToQuantum(QuantumRange*Z); break; } } SetPixelRed(q,ClampToQuantum((MagickRealType) red)); SetPixelGreen(q,ClampToQuantum((MagickRealType) green)); SetPixelBlue(q,ClampToQuantum((MagickRealType) blue)); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace) == MagickFalse) return(MagickFalse); return(status); } case LogColorspace: { const char *value; double black, density, film_gamma, gamma, reference_black, reference_white; Quantum *logmap; /* Transform Log to sRGB colorspace. */ density=DisplayGamma; gamma=DisplayGamma; value=GetImageProperty(image,"gamma"); if (value != (const char *) NULL) gamma=PerceptibleReciprocal(StringToDouble(value,(char **) NULL)); film_gamma=FilmGamma; value=GetImageProperty(image,"film-gamma"); if (value != (const char *) NULL) film_gamma=StringToDouble(value,(char **) NULL); reference_black=ReferenceBlack; value=GetImageProperty(image,"reference-black"); if (value != (const char *) NULL) reference_black=StringToDouble(value,(char **) NULL); reference_white=ReferenceWhite; value=GetImageProperty(image,"reference-white"); if (value != (const char *) NULL) reference_white=StringToDouble(value,(char **) NULL); logmap=(Quantum *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*logmap)); if (logmap == (Quantum *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); black=pow(10.0,(reference_black-reference_white)*(gamma/density)*0.002/ film_gamma); for (i=0; i <= (ssize_t) (reference_black*MaxMap/1024.0); i++) logmap[i]=(Quantum) 0; for ( ; i < (ssize_t) (reference_white*MaxMap/1024.0); i++) logmap[i]=ClampToQuantum((MagickRealType) QuantumRange/(1.0-black)* (pow(10.0,(1024.0*i/MaxMap-reference_white)*(gamma/density)*0.002/ film_gamma)-black)); for ( ; i <= (ssize_t) MaxMap; i++) logmap[i]=QuantumRange; if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=(ssize_t) image->columns; x != 0; x--) { Quantum blue, green, red; red=ClampToQuantum(EncodePixelGamma((MagickRealType) logmap[ScaleQuantumToMap(GetPixelRed(q))])); green=ClampToQuantum(EncodePixelGamma((MagickRealType) logmap[ScaleQuantumToMap(GetPixelGreen(q))])); blue=ClampToQuantum(EncodePixelGamma((MagickRealType) logmap[ScaleQuantumToMap(GetPixelBlue(q))])); SetPixelRed(q,red); SetPixelGreen(q,green); SetPixelBlue(q,blue); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); logmap=(Quantum *) RelinquishMagickMemory(logmap); if (SetImageColorspace(image,sRGBColorspace) == MagickFalse) return(MagickFalse); return(status); } case RGBColorspace: case scRGBColorspace: { /* Transform linear RGB to sRGB colorspace. */ if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=(ssize_t) image->columns; x != 0; x--) { Quantum blue, green, red; red=ClampToQuantum(EncodePixelGamma((MagickRealType) GetPixelRed(q))); green=ClampToQuantum(EncodePixelGamma((MagickRealType) GetPixelGreen(q))); blue=ClampToQuantum(EncodePixelGamma((MagickRealType) GetPixelBlue(q))); SetPixelRed(q,red); SetPixelGreen(q,green); SetPixelBlue(q,blue); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace) == MagickFalse) return(MagickFalse); return(status); } default: break; } /* Allocate the tables. */ x_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*x_map)); y_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*y_map)); z_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*z_map)); if ((x_map == (TransformPacket *) NULL) || (y_map == (TransformPacket *) NULL) || (z_map == (TransformPacket *) NULL)) { if (z_map != (TransformPacket *) NULL) z_map=(TransformPacket *) RelinquishMagickMemory(z_map); if (y_map != (TransformPacket *) NULL) y_map=(TransformPacket *) RelinquishMagickMemory(y_map); if (x_map != (TransformPacket *) NULL) x_map=(TransformPacket *) RelinquishMagickMemory(x_map); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } switch (colorspace) { case OHTAColorspace: { /* Initialize OHTA tables: R = I1+1.00000*I2-0.66668*I3 G = I1+0.00000*I2+1.33333*I3 B = I1-1.00000*I2-0.66668*I3 I and Q, normally -0.5 through 0.5, must be normalized to the range 0 through QuantumRange. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(1.0*(double) i); y_map[i].x=(0.5*1.00000*(2.0*(double) i-MaxMap)); z_map[i].x=(-0.5*0.66668*(2.0*(double) i-MaxMap)); x_map[i].y=(1.0*(double) i); y_map[i].y=(0.5*0.00000*(2.0*(double) i-MaxMap)); z_map[i].y=(0.5*1.33333*(2.0*(double) i-MaxMap)); x_map[i].z=(1.0*(double) i); y_map[i].z=(-0.5*1.00000*(2.0*(double) i-MaxMap)); z_map[i].z=(-0.5*0.66668*(2.0*(double) i-MaxMap)); } break; } case Rec601YCbCrColorspace: { /* Initialize YCbCr tables: R = Y +1.402000*Cr G = Y-0.344136*Cb-0.714136*Cr B = Y+1.772000*Cb Cb and Cr, normally -0.5 through 0.5, must be normalized to the range 0 through QuantumRange. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=0.99999999999914679361*(double) i; y_map[i].x=0.5*(-1.2188941887145875e-06)*(2.00*(double) i-MaxMap); z_map[i].x=0.5*1.4019995886561440468*(2.00*(double) i-MaxMap); x_map[i].y=0.99999975910502514331*(double) i; y_map[i].y=0.5*(-0.34413567816504303521)*(2.00*(double) i-MaxMap); z_map[i].y=0.5*(-0.71413649331646789076)*(2.00*(double) i-MaxMap); x_map[i].z=1.00000124040004623180*(double) i; y_map[i].z=0.5*1.77200006607230409200*(2.00*(double) i-MaxMap); z_map[i].z=0.5*2.1453384174593273e-06*(2.00*(double) i-MaxMap); } break; } case Rec709YCbCrColorspace: { /* Initialize YCbCr tables: R = Y +1.574800*Cr G = Y-0.187324*Cb-0.468124*Cr B = Y+1.855600*Cb Cb and Cr, normally -0.5 through 0.5, must be normalized to the range 0 through QuantumRange. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.0*(double) i); y_map[i].x=(MagickRealType) (0.5*0.000000*(2.0*(double) i-MaxMap)); z_map[i].x=(MagickRealType) (0.5*1.574800*(2.0*(double) i-MaxMap)); x_map[i].y=(MagickRealType) (1.0*(double) i); y_map[i].y=(MagickRealType) (0.5*(-0.187324)*(2.0*(double) i-MaxMap)); z_map[i].y=(MagickRealType) (0.5*(-0.468124)*(2.0*(double) i-MaxMap)); x_map[i].z=(MagickRealType) (1.0*(double) i); y_map[i].z=(MagickRealType) (0.5*1.855600*(2.0*(double) i-MaxMap)); z_map[i].z=(MagickRealType) (0.5*0.000000*(2.0*(double) i-MaxMap)); } break; } case YCCColorspace: { /* Initialize YCC tables: R = Y +1.340762*C2 G = Y-0.317038*C1-0.682243*C2 B = Y+1.632639*C1 YCC is scaled by 1.3584. C1 zero is 156 and C2 is at 137. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.3584000*(double) i); y_map[i].x=(MagickRealType) (0.0000000); z_map[i].x=(MagickRealType) (1.8215000*((double) i-(MagickRealType) ScaleQuantumToMap(ScaleCharToQuantum(137)))); x_map[i].y=(MagickRealType) (1.3584000*(double) i); y_map[i].y=(MagickRealType) ((-0.4302726)*((double) i-(MagickRealType) ScaleQuantumToMap(ScaleCharToQuantum(156)))); z_map[i].y=(MagickRealType) ((-0.9271435)*((double) i-(MagickRealType) ScaleQuantumToMap(ScaleCharToQuantum(137)))); x_map[i].z=(MagickRealType) (1.3584000*(double) i); y_map[i].z=(MagickRealType) (2.2179000*((double) i-(MagickRealType) ScaleQuantumToMap(ScaleCharToQuantum(156)))); z_map[i].z=(MagickRealType) (0.0000000); } break; } default: { /* Linear conversion tables. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.0*(double) i); y_map[i].x=(MagickRealType) 0.0; z_map[i].x=(MagickRealType) 0.0; x_map[i].y=(MagickRealType) 0.0; y_map[i].y=(MagickRealType) (1.0*(double) i); z_map[i].y=(MagickRealType) 0.0; x_map[i].z=(MagickRealType) 0.0; y_map[i].z=(MagickRealType) 0.0; z_map[i].z=(MagickRealType) (1.0*(double) i); } break; } } /* Convert to sRGB. */ switch (image->storage_class) { case DirectClass: default: { /* Convert DirectClass image. */ image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; MagickPixelPacket pixel; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register size_t blue, green, red; red=ScaleQuantumToMap(GetPixelRed(q)); green=ScaleQuantumToMap(GetPixelGreen(q)); blue=ScaleQuantumToMap(GetPixelBlue(q)); pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x; pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y; pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z; if (colorspace == YCCColorspace) { pixel.red=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.red/ (double) MaxMap)]; pixel.green=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.green/ (double) MaxMap)]; pixel.blue=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.blue/ (double) MaxMap)]; } else { pixel.red=(MagickRealType) ScaleMapToQuantum(pixel.red); pixel.green=(MagickRealType) ScaleMapToQuantum(pixel.green); pixel.blue=(MagickRealType) ScaleMapToQuantum(pixel.blue); } SetPixelRed(q,ClampToQuantum(pixel.red)); SetPixelGreen(q,ClampToQuantum(pixel.green)); SetPixelBlue(q,ClampToQuantum(pixel.blue)); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,TransformRGBImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); break; } case PseudoClass: { /* Convert PseudoClass image. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->colors,1) #endif for (i=0; i < (ssize_t) image->colors; i++) { MagickPixelPacket pixel; register size_t blue, green, red; red=ScaleQuantumToMap(image->colormap[i].red); green=ScaleQuantumToMap(image->colormap[i].green); blue=ScaleQuantumToMap(image->colormap[i].blue); pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x; pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y; pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z; if (colorspace == YCCColorspace) { pixel.red=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.red/ (double) MaxMap)]; pixel.green=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.green/ (double) MaxMap)]; pixel.blue=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.blue/ (double) MaxMap)]; } else { pixel.red=(MagickRealType) ScaleMapToQuantum(pixel.red); pixel.green=(MagickRealType) ScaleMapToQuantum(pixel.green); pixel.blue=(MagickRealType) ScaleMapToQuantum(pixel.blue); } image->colormap[i].red=ClampToQuantum(pixel.red); image->colormap[i].green=ClampToQuantum(pixel.green); image->colormap[i].blue=ClampToQuantum(pixel.blue); } (void) SyncImage(image); break; } } /* Relinquish resources. */ z_map=(TransformPacket *) RelinquishMagickMemory(z_map); y_map=(TransformPacket *) RelinquishMagickMemory(y_map); x_map=(TransformPacket *) RelinquishMagickMemory(x_map); if (SetImageColorspace(image,sRGBColorspace) == MagickFalse) return(MagickFalse); return(MagickTrue); }
GB_binop__iseq_int16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__iseq_int16) // A.*B function (eWiseMult): GB (_AemultB_08__iseq_int16) // A.*B function (eWiseMult): GB (_AemultB_02__iseq_int16) // A.*B function (eWiseMult): GB (_AemultB_04__iseq_int16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__iseq_int16) // A*D function (colscale): GB (_AxD__iseq_int16) // D*A function (rowscale): GB (_DxB__iseq_int16) // C+=B function (dense accum): GB (_Cdense_accumB__iseq_int16) // C+=b function (dense accum): GB (_Cdense_accumb__iseq_int16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__iseq_int16) // C=scalar+B GB (_bind1st__iseq_int16) // C=scalar+B' GB (_bind1st_tran__iseq_int16) // C=A+scalar GB (_bind2nd__iseq_int16) // C=A'+scalar GB (_bind2nd_tran__iseq_int16) // C type: int16_t // A type: int16_t // A pattern? 0 // B type: int16_t // B pattern? 0 // BinaryOp: cij = (aij == bij) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ int16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int16_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int16_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x == y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISEQ || GxB_NO_INT16 || GxB_NO_ISEQ_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__iseq_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__iseq_int16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__iseq_int16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__iseq_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__iseq_int16) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__iseq_int16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int16_t alpha_scalar ; int16_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int16_t *) alpha_scalar_in)) ; beta_scalar = (*((int16_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__iseq_int16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__iseq_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__iseq_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__iseq_int16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__iseq_int16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *Cx = (int16_t *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int16_t bij = GBX (Bx, p, false) ; Cx [p] = (x == bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__iseq_int16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int16_t *Cx = (int16_t *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int16_t aij = GBX (Ax, p, false) ; Cx [p] = (aij == y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x == aij) ; \ } GrB_Info GB (_bind1st_tran__iseq_int16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij == y) ; \ } GrB_Info GB (_bind2nd_tran__iseq_int16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
Secciones.c
#include <stdio.h> #include <stdlib.h> #ifdef _OPENMP #include <omp.h> #define TRUE 1 #define FALSE 0 #else #define omp_get_thread_num() 0 #endif void funcA(); void funcB(); int main() { #ifdef _OPENMP (void) omp_set_dynamic(FALSE); if (omp_get_dynamic()) {printf("Warning: dynamic adjustment of threads has been set\n");} (void) omp_set_num_threads(4); #endif #pragma omp parallel { #pragma omp sections { #pragma omp section (void) funcA(); #pragma omp section (void) funcB(); } // Final del bloque de secciones } // Final de la region paralela return(0); } void funcA() { printf("En funcA: esta seccion es ejecutada por el hilo %d\n", omp_get_thread_num()); } void funcB() { printf("En funcB: esta seccion es ejecutada por el hilo %d\n", omp_get_thread_num()); }
TinyErode.h
// SPDX-License-Identifier: MIT // _______ _ ______ _ // |__ __(_) | ____| | | // | | _ _ __ _ _| |__ _ __ ___ __| | ___ // | | | | '_ \| | | | __| | '__/ _ \ / _` |/ _ \ // | | | | | | | |_| | |____| | | (_) | (_| | __/ // |_| |_|_| |_|\__, |______|_| \___/ \__,_|\___| // __/ | // |___/ // // Copyright (C) 2021 Taylor Holberton // // A C++ library for simulating erosion. #pragma once #ifndef TINYERODE_H_INCLUDED #define TINYERODE_H_INCLUDED #include <algorithm> #include <array> #include <numeric> #include <vector> #include <cassert> #include <cmath> namespace TinyErode { /// Used for simulating a rainfall event on a terrain. /// Stores information on the terrain that is required to simulate the effect of /// hydraulic erosion. /// /// @note The class should only be used once per rainfall event. class Simulation final { public: Simulation(int w = 0, int h = 0); void SetTimeStep(float timeStep) noexcept { mTimeStep = timeStep; } float GetTimeStep() const noexcept { return mTimeStep; } int GetWidth() const noexcept { return mSize[0]; } int GetHeight() const noexcept { return mSize[1]; } /// Called at the beginning of each iteration. /// This function will compute the flow rate of each cell, based on the level /// of water and the elevation at each point in the height map. /// /// @param height The function taking an x and y coordinate and returning the /// height value at each point in the map. /// /// @param water The function taking an x and y coordinate and returning the /// water level at each point in the map. The water levels can be /// initialized to simulate rainfall or river streams. template<typename Height, typename Water> void ComputeFlowAndTilt(const Height& height, const Water& water); /// This function is called after @ref Simulation::ComputeFlow in order to /// determine where the water at each cell is going to be moving. /// /// @param waterAdder A function taking an x and y coordinate as well as a /// water value to be added to a cell within the water /// model. template<typename WaterAdder> void TransportWater(WaterAdder waterAdder); /// Erodes and deposites sediment, and then moves remaining sediment based on /// the velocity of the water at each cell. /// /// @param kC A function taking an x and y coordinate and returning the /// carry capacity constant at that particular location. /// /// @param kD A function taking an x and y coordinate and returning the /// deposition constant at that particular location. /// /// @param kE A function taking an x and y coordinate and returning the /// erosion constant at that particular location. /// /// @param heightAdder A function taking an x and y coordinate, as well as a /// height delta, and adding the value to the height model. /// /// @note For simple models, @p Kc @p Ke and @p Kd can both be single, uniform /// values. template<typename CarryCapacity, typename Deposition, typename Erosion, typename HeightAdder> void TransportSediment(CarryCapacity kC, Deposition kD, Erosion kE, HeightAdder heightAdder); /// Evaporates water in the water model, based on evaporation constants. /// /// @param waterAdder A function taking an x and y coordiante as well as a /// water value to be added to a cell within the water /// model. /// /// @param kEvap A function taking an x and y coordinate and returning the /// evaporation constant at that particular location. It is /// the responsibility of this function to ensure that the water /// level does not become negative at this step. template<typename WaterAdder, typename Evaporation> void Evaporate(WaterAdder water, Evaporation kEvap); /// Deposites all currently suspended sediment into the terrain. template<typename HeightAdder> void TerminateRainfall(HeightAdder heightAdder); void Resize(int w, int h); /// Gets the sediment levels at each cell. Useful primarily for debugging. auto GetSediment() const noexcept -> const std::vector<float>& { return mSediment; } void SetMetersPerX(float metersPerX) noexcept { mPipeLengths[0] = metersPerX; } void SetMetersPerY(float metersPerY) noexcept { mPipeLengths[1] = metersPerY; } private: using Velocity = std::array<float, 2>; using Flow = std::array<float, 4>; template<typename Height, typename Water> void ComputeFlowAndTiltAt(const Height& height, const Water& water, int x, int y); template<typename WaterAdder> void TransportWaterAt(WaterAdder& water, int x, int y); template<typename CarryCapacity, typename Deposition, typename Erosion, typename HeightAdder> void ErodeAndDeposit(CarryCapacity& kC, Deposition& kD, Erosion& kE, HeightAdder& heightAdder, int x, int y); const Flow& GetFlow(int x, int y) const noexcept { return mFlow[(y * GetWidth()) + x]; } Flow& GetFlow(int x, int y) noexcept { return mFlow[(y * GetWidth()) + x]; } bool InBounds(int x, int y) const noexcept { return (x >= 0) && (x < GetWidth()) && (y >= 0) && (y < GetHeight()); } Flow GetInflow(int x, int y) const noexcept; float GetScalingFactor(const Flow& flow, float waterLevel) noexcept; int ToIndex(int x, int y) const noexcept { return (y * GetWidth()) + x; } private: float mTimeStep = 0.0125; float mGravity = 9.8; std::array<float, 2> mPipeLengths{ 1, 1 }; std::array<int, 2> mSize{ 0, 0 }; std::vector<Flow> mFlow; std::vector<float> mSediment; std::vector<Velocity> mVelocity; std::vector<float> mTilt; }; // Implementation details beyond this point. inline Simulation::Simulation(int w, int h) { Resize(w, h); } template<typename WaterAdder> void Simulation::TransportWater(WaterAdder water) { #ifdef _OPENMP #pragma omp parallel for #endif for (int y = 0; y < GetHeight(); y++) { for (int x = 0; x < GetWidth(); x++) { TransportWaterAt(water, x, y); } } } template<typename WaterAdder> void Simulation::TransportWaterAt(WaterAdder& water, int x, int y) { auto& flow = GetFlow(x, y); auto inflow = GetInflow(x, y); auto inflowSum = std::accumulate(inflow.begin(), inflow.end(), 0.0f); auto outflowSum = std::accumulate(flow.begin(), flow.end(), 0.0f); auto volumeDelta = (inflowSum - outflowSum) * mTimeStep; auto waterDelta = volumeDelta / (mPipeLengths[0] * mPipeLengths[1]); float waterLevel = water(x, y, waterDelta); // Compute Water Velocity float dx = 0.5f * ((inflow[1] - flow[1]) + (flow[2] - inflow[2])); float dy = 0.5f * ((flow[0] - inflow[0]) + (inflow[3] - flow[3])); float avgWaterLevel = waterLevel - (waterDelta * 0.5f); Velocity velocity{ { 0, 0 } }; if (avgWaterLevel != 0.0f) { velocity[0] = dx / (mPipeLengths[0] * avgWaterLevel); velocity[1] = dy / (mPipeLengths[1] * avgWaterLevel); } mVelocity[ToIndex(x, y)] = velocity; } template<typename Height, typename Water> void Simulation::ComputeFlowAndTilt(const Height& height, const Water& water) { #ifdef _OPENMP #pragma omp parallel for #endif for (int y = 0; y < GetHeight(); y++) { for (int x = 0; x < GetWidth(); x++) ComputeFlowAndTiltAt(height, water, x, y); } } template<typename Height, typename Water> void Simulation::ComputeFlowAndTiltAt(const Height& height, const Water& water, int x, int y) { auto& center = GetFlow(x, y); std::array<int, 4> xDeltas{ { 0, -1, 1, 0 } }; std::array<int, 4> yDeltas{ { -1, 0, 0, 1 } }; auto centerH = height(x, y); auto centerW = water(x, y); std::array<float, 4> heightNeighbors{ centerH, centerH, centerH, centerH }; std::array<int, 4> pipeLengthIndices{ { 1, 0, 0, 1 } }; for (int i = 0; i < 4; i++) { auto neighborX = x + xDeltas[i]; auto neighborY = y + yDeltas[i]; if (!InBounds(neighborX, neighborY)) continue; heightNeighbors[i] = height(neighborX, neighborY); auto neighborH = heightNeighbors[i]; auto neighborW = water(neighborX, neighborY); auto heightDiff = (centerH + centerW) - (neighborH + neighborW); // Cross sectional area of the virtual pipe. float area = 1; // Length of the virtual pipe. float pipeLength = mPipeLengths[pipeLengthIndices[i]]; auto c = mTimeStep * area * (mGravity * heightDiff) / pipeLength; center[i] = std::max(0.0f, center[i] + c); } float totalOutputVolume = std::accumulate(center.begin(), center.end(), 0.0f) * mTimeStep; if (totalOutputVolume > (centerW * mPipeLengths[0] * mPipeLengths[1])) { auto k = GetScalingFactor(center, centerW); for (auto& n : center) n *= k; } // Compute Tilt float avgDeltaY = 0; avgDeltaY += (centerH - heightNeighbors[0]); avgDeltaY += (heightNeighbors[3] - centerH); avgDeltaY *= 0.5f; float avgDeltaX = 0; avgDeltaX += (centerH - heightNeighbors[1]); avgDeltaX += (heightNeighbors[2] - centerH); avgDeltaX *= 0.5f; float a = avgDeltaX * avgDeltaX; float b = avgDeltaY * avgDeltaY; auto abSum = a + b; mTilt[ToIndex(x, y)] = std::sqrt(abSum) / std::sqrt(1 + abSum); } template<typename CarryCapacity, typename Deposition, typename Erosion, typename HeightAdder> void Simulation::TransportSediment(CarryCapacity kC, Deposition kD, Erosion kE, HeightAdder heightAdder) { #ifdef _OPENMP #pragma omp parallel for #endif for (int y = 0; y < GetHeight(); y++) { for (int x = 0; x < GetWidth(); x++) ErodeAndDeposit(kC, kD, kE, heightAdder, x, y); } std::vector<float> nextSediment(GetWidth() * GetHeight()); #ifdef _OPENMP #pragma omp parallel for #endif for (int y = 0; y < GetHeight(); y++) { for (int x = 0; x < GetWidth(); x++) { auto index = ToIndex(x, y); auto vel = mVelocity[index]; auto xf = x - (vel[0] * mTimeStep); auto yf = y + (vel[1] * mTimeStep); auto xfi = int(xf); auto yfi = int(yf); auto u = xf - xfi; auto v = yf - yfi; std::array<float, 4> s{ { 0, 0, 0, 0 } }; if (InBounds(xfi + 0, yfi + 0)) s[0] = mSediment[ToIndex(xfi + 0, yfi + 0)]; if (InBounds(xfi + 1, yfi + 0)) s[1] = mSediment[ToIndex(xfi + 1, yfi + 0)]; if (InBounds(xfi + 0, yfi + 1)) s[2] = mSediment[ToIndex(xfi + 0, yfi + 1)]; if (InBounds(xfi + 1, yfi + 1)) s[3] = mSediment[ToIndex(xfi + 1, yfi + 1)]; float sx1 = s[0] + (u * (s[1] - s[0])); float sx2 = s[2] + (u * (s[3] - s[2])); nextSediment[index] = sx1 + (v * (sx2 - sx1)); } } mSediment = std::move(nextSediment); } template<typename HeightAdder> void Simulation::TerminateRainfall(HeightAdder heightAdder) { #ifdef _OPENMP #pragma omp parallel for #endif for (int y = 0; y < GetHeight(); y++) { for (int x = 0; x < GetWidth(); x++) { auto index = ToIndex(x, y); float sediment = mSediment[index]; heightAdder(x, y, sediment / (mPipeLengths[0] * mPipeLengths[1])); mSediment[index] = 0; mVelocity[index][0] = 0; mVelocity[index][1] = 0; } } } template<typename CarryCapacity, typename Deposition, typename Erosion, typename HeightAdder> void Simulation::ErodeAndDeposit(CarryCapacity& kC, Deposition& kD, Erosion& kE, HeightAdder& heightAdder, int x, int y) { auto vel = mVelocity[ToIndex(x, y)]; auto velocityMagnitude = std::sqrt((vel[0] * vel[0]) + (vel[1] * vel[1])); float tiltAngle = mTilt[ToIndex(x, y)]; float capacity = kC(x, y) * std::max(0.01f, tiltAngle) * velocityMagnitude; float sediment = mSediment[ToIndex(x, y)]; float factor = (capacity > sediment) ? kE(x, y) : kD(x, y); heightAdder(x, y, -(factor * (capacity - sediment))); mSediment[ToIndex(x, y)] += factor * (capacity - sediment); } template<typename WaterAdder, typename Evaporation> void Simulation::Evaporate(WaterAdder water, Evaporation kEvap) { #ifdef _OPENMP #pragma omp parallel for #endif for (int y = 0; y < GetHeight(); y++) { for (int x = 0; x < GetWidth(); x++) water(x, y, -mTimeStep * kEvap(x, y)); } } inline auto Simulation::GetInflow(int centerX, int centerY) const noexcept -> Flow { std::array<int, 4> xDeltas{ { 0, -1, 1, 0 } }; std::array<int, 4> yDeltas{ { -1, 0, 0, 1 } }; std::array<float, 4> inflow{ { 0, 0, 0, 0 } }; for (int i = 0; i < 4; i++) { int x = centerX + xDeltas[i]; int y = centerY + yDeltas[i]; if (InBounds(x, y)) inflow[i] = GetFlow(x, y)[3 - i]; } return inflow; } inline float Simulation::GetScalingFactor(const Flow& flow, float waterLevel) noexcept { auto volume = std::accumulate(flow.begin(), flow.end(), 0.0f) * mTimeStep; if (volume == 0.0f) return 1.0f; return std::min(1.0f, (waterLevel * mPipeLengths[0] * mPipeLengths[1]) / volume); } inline void Simulation::Resize(int w, int h) { assert(w >= 0); assert(h >= 0); w = std::max(w, 0); h = std::max(h, 0); mFlow.resize(w * h); mSediment.resize(w * h); mVelocity.resize(w * h); mTilt.resize(w * h); mSize[0] = w; mSize[1] = h; } } // namespace TinyErode #endif // TINYERODE_H_INCLUDED
dft.c
// Copyright Naoki Shibata 2010 - 2017. // Distributed under the Boost Software License, Version 1.0. // (See accompanying file LICENSE.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <string.h> #include <math.h> #include <assert.h> #include <signal.h> #include <setjmp.h> #include "sleef.h" #include "misc.h" #include "common.h" #include "arraymap.h" #include "dftcommon.h" #ifdef _OPENMP #include <omp.h> #endif #if BASETYPEID == 1 typedef double real; typedef Sleef_double2 sc_t; #define BASETYPESTRING "double" #define MAGIC 0x27182818 #define MAGIC2D 0x17320508 #define INIT SleefDFT_double_init1d #define EXECUTE SleefDFT_double_execute #define INIT2D SleefDFT_double_init2d #define CTBL ctbl_double #define REALSUB0 realSub0_double #define REALSUB1 realSub1_double #define GETINT getInt_double #define GETPTR getPtr_double #define DFTF dftf_double #define DFTB dftb_double #define TBUTF tbutf_double #define TBUTB tbutb_double #define BUTF butf_double #define BUTB butb_double #define SINCOSPI Sleef_sincospi_u05 #include "dispatchdp.h" #elif BASETYPEID == 2 typedef float real; typedef Sleef_float2 sc_t; #define BASETYPESTRING "float" #define MAGIC 0x31415926 #define MAGIC2D 0x22360679 #define INIT SleefDFT_float_init1d #define EXECUTE SleefDFT_float_execute #define INIT2D SleefDFT_float_init2d #define CTBL ctbl_float #define REALSUB0 realSub0_float #define REALSUB1 realSub1_float #define GETINT getInt_float #define GETPTR getPtr_float #define DFTF dftf_float #define DFTB dftb_float #define TBUTF tbutf_float #define TBUTB tbutb_float #define BUTF butf_float #define BUTB butb_float #define SINCOSPI Sleef_sincospif_u05 #include "dispatchsp.h" #elif BASETYPEID == 3 typedef long double real; typedef Sleef_longdouble2 sc_t; #define BASETYPESTRING "long double" #define MAGIC 0x14142135 #define MAGIC2D 0x26457513 #define INIT SleefDFT_longdouble_init1d #define EXECUTE SleefDFT_longdouble_execute #define INIT2D SleefDFT_longdouble_init2d #define CTBL ctbl_longdouble #define REALSUB0 realSub0_longdouble #define REALSUB1 realSub1_longdouble #define GETINT getInt_longdouble #define GETPTR getPtr_longdouble #define DFTF dftf_longdouble #define DFTB dftb_longdouble #define TBUTF tbutf_longdouble #define TBUTB tbutb_longdouble #define BUTF butf_longdouble #define BUTB butb_longdouble #define SINCOSPI Sleef_sincospil_u05 #include "dispatchld.h" #elif BASETYPEID == 4 typedef Sleef_quad real; typedef Sleef_quad2 sc_t; #define BASETYPESTRING "Sleef_quad" #define MAGIC 0x33166247 #define MAGIC2D 0x36055512 #define INIT SleefDFT_quad_init1d #define EXECUTE SleefDFT_quad_execute #define INIT2D SleefDFT_quad_init2d #define CTBL ctbl_Sleef_quad #define REALSUB0 realSub0_Sleef_quad #define REALSUB1 realSub1_Sleef_quad #define GETINT getInt_Sleef_quad #define GETPTR getPtr_Sleef_quad #define DFTF dftf_Sleef_quad #define DFTB dftb_Sleef_quad #define TBUTF tbutf_Sleef_quad #define TBUTB tbutb_Sleef_quad #define BUTF butf_Sleef_quad #define BUTB butb_Sleef_quad #define SINCOSPI Sleef_sincospiq_u05 #include "dispatchqp.h" #else #error No BASETYPEID specified #endif #define IMPORT_IS_EXPORT #include "sleefdft.h" // #if BASETYPEID == 4 real CTBL[] = { 0.7071067811865475243818940365159164684883Q, -0.7071067811865475243818940365159164684883Q, 0.9238795325112867561014214079495587839119Q, -0.382683432365089771723257530688933059082Q, 0.382683432365089771723257530688933059082Q, -0.9238795325112867561014214079495587839119Q, #if MAXBUTWIDTH >= 5 0.9807852804032304491190993878113602022495Q, -0.1950903220161282678433729148581576851029Q, 0.5555702330196022247573058028269343822103Q, -0.8314696123025452370808655033762590846891Q, 0.8314696123025452370808655033762590846891Q, -0.5555702330196022247573058028269343822103Q, 0.1950903220161282678433729148581576851029Q, -0.9807852804032304491190993878113602022495Q, #endif #if MAXBUTWIDTH >= 6 0.9951847266721968862310254699821143731242Q, -0.09801714032956060199569840382660679267701Q, 0.6343932841636454982026105398063009488396Q, -0.7730104533627369607965383602188325085081Q, 0.881921264348355029715105513066220055407Q, -0.4713967368259976485449225247492677226546Q, 0.2902846772544623676448431737195932100803Q, -0.9569403357322088649310892760624369657307Q, 0.9569403357322088649310892760624369657307Q, -0.2902846772544623676448431737195932100803Q, 0.4713967368259976485449225247492677226546Q, -0.881921264348355029715105513066220055407Q, 0.7730104533627369607965383602188325085081Q, -0.6343932841636454982026105398063009488396Q, 0.09801714032956060199569840382660679267701Q, -0.9951847266721968862310254699821143731242Q, #endif #if MAXBUTWIDTH >= 7 0.9987954562051723927007702841240899260811Q, -0.04906767432741801425355085940205324135377Q, 0.6715589548470184006194634573905233310143Q, -0.7409511253549590911932944126139233276263Q, 0.9039892931234433315823215138173907234886Q, -0.427555093430282094315230886905077056781Q, 0.336889853392220050702686798271834334173Q, -0.9415440651830207783906830087961026265475Q, 0.9700312531945439926159106824865574481009Q, -0.2429801799032638899447731489766866275204Q, 0.5141027441932217266072797923204262815489Q, -0.8577286100002720698929313536407192941624Q, 0.8032075314806449097991200569701675249235Q, -0.5956993044924333434615715265891822127742Q, 0.1467304744553617516588479505190711904561Q, -0.9891765099647809734561415551112872890371Q, 0.9891765099647809734561415551112872890371Q, -0.1467304744553617516588479505190711904561Q, 0.5956993044924333434615715265891822127742Q, -0.8032075314806449097991200569701675249235Q, 0.8577286100002720698929313536407192941624Q, -0.5141027441932217266072797923204262815489Q, 0.2429801799032638899447731489766866275204Q, -0.9700312531945439926159106824865574481009Q, 0.9415440651830207783906830087961026265475Q, -0.336889853392220050702686798271834334173Q, 0.427555093430282094315230886905077056781Q, -0.9039892931234433315823215138173907234886Q, 0.7409511253549590911932944126139233276263Q, -0.6715589548470184006194634573905233310143Q, 0.04906767432741801425355085940205324135377Q, -0.9987954562051723927007702841240899260811Q, #endif }; #else real CTBL[] = { 0.7071067811865475243818940365159164684883L, -0.7071067811865475243818940365159164684883L, 0.9238795325112867561014214079495587839119L, -0.382683432365089771723257530688933059082L, 0.382683432365089771723257530688933059082L, -0.9238795325112867561014214079495587839119L, #if MAXBUTWIDTH >= 5 0.9807852804032304491190993878113602022495L, -0.1950903220161282678433729148581576851029L, 0.5555702330196022247573058028269343822103L, -0.8314696123025452370808655033762590846891L, 0.8314696123025452370808655033762590846891L, -0.5555702330196022247573058028269343822103L, 0.1950903220161282678433729148581576851029L, -0.9807852804032304491190993878113602022495L, #endif #if MAXBUTWIDTH >= 6 0.9951847266721968862310254699821143731242L, -0.09801714032956060199569840382660679267701L, 0.6343932841636454982026105398063009488396L, -0.7730104533627369607965383602188325085081L, 0.881921264348355029715105513066220055407L, -0.4713967368259976485449225247492677226546L, 0.2902846772544623676448431737195932100803L, -0.9569403357322088649310892760624369657307L, 0.9569403357322088649310892760624369657307L, -0.2902846772544623676448431737195932100803L, 0.4713967368259976485449225247492677226546L, -0.881921264348355029715105513066220055407L, 0.7730104533627369607965383602188325085081L, -0.6343932841636454982026105398063009488396L, 0.09801714032956060199569840382660679267701L, -0.9951847266721968862310254699821143731242L, #endif #if MAXBUTWIDTH >= 7 0.9987954562051723927007702841240899260811L, -0.04906767432741801425355085940205324135377L, 0.6715589548470184006194634573905233310143L, -0.7409511253549590911932944126139233276263L, 0.9039892931234433315823215138173907234886L, -0.427555093430282094315230886905077056781L, 0.336889853392220050702686798271834334173L, -0.9415440651830207783906830087961026265475L, 0.9700312531945439926159106824865574481009L, -0.2429801799032638899447731489766866275204L, 0.5141027441932217266072797923204262815489L, -0.8577286100002720698929313536407192941624L, 0.8032075314806449097991200569701675249235L, -0.5956993044924333434615715265891822127742L, 0.1467304744553617516588479505190711904561L, -0.9891765099647809734561415551112872890371L, 0.9891765099647809734561415551112872890371L, -0.1467304744553617516588479505190711904561L, 0.5956993044924333434615715265891822127742L, -0.8032075314806449097991200569701675249235L, 0.8577286100002720698929313536407192941624L, -0.5141027441932217266072797923204262815489L, 0.2429801799032638899447731489766866275204L, -0.9700312531945439926159106824865574481009L, 0.9415440651830207783906830087961026265475L, -0.336889853392220050702686798271834334173L, 0.427555093430282094315230886905077056781L, -0.9039892931234433315823215138173907234886L, 0.7409511253549590911932944126139233276263L, -0.6715589548470184006194634573905233310143L, 0.04906767432741801425355085940205324135377L, -0.9987954562051723927007702841240899260811L, #endif }; #endif #ifndef ENABLE_STREAM #error ENABLE_STREAM not defined #endif static const int constK[] = { 0, 2, 6, 14, 38, 94, 230, 542, 1254 }; extern const char *configStr[]; extern int planFilePathSet; // Utility functions static jmp_buf sigjmp; static void sighandler(int signum) { longjmp(sigjmp, 1); } static int checkISAAvailability(int isa) { signal(SIGILL, sighandler); if (setjmp(sigjmp) == 0) { int ret = GETINT[isa] != NULL && (*GETINT[isa])(BASETYPEID); signal(SIGILL, SIG_DFL); return ret; } signal(SIGILL, SIG_DFL); return 0; } #ifdef _OPENMP static int omp_thread_count() { int n = 0; #pragma omp parallel reduction(+:n) n += 1; return n; } #endif static void startAllThreads(const int nth) { #ifdef _OPENMP volatile int8_t *state = calloc(nth, 1); int th; #pragma omp parallel for for(th=0;th<nth;th++) { state[th] = 1; for(;;) { int i; for(i=0;i<nth;i++) if (state[i] == 0) break; if (i == nth) break; } } free((void *)state); #endif } // Dispatcher static void dispatch(SleefDFT *p, const int N, real *d, const real *s, const int level, const int config) { const int K = constK[N], log2len = p->log2len; if (level == N) { if ((p->mode & SLEEF_MODE_BACKWARD) == 0) { void (*func)(real *, const real *, const int) = DFTF[config][p->isa][N]; (*func)(d, s, log2len-N); } else { void (*func)(real *, const real *, const int) = DFTB[config][p->isa][N]; (*func)(d, s, log2len-N); } } else if (level == log2len) { assert(p->vecwidth <= (1 << N)); if ((p->mode & SLEEF_MODE_BACKWARD) == 0) { void (*func)(real *, uint32_t *, const real *, const int, const real *, const int) = TBUTF[config][p->isa][N]; (*func)(d, p->perm[level], s, log2len-N, p->tbl[N][level], K); } else { void (*func)(real *, uint32_t *, const real *, const int, const real *, const int) = TBUTB[config][p->isa][N]; (*func)(d, p->perm[level], s, log2len-N, p->tbl[N][level], K); } } else { if ((p->mode & SLEEF_MODE_BACKWARD) == 0) { void (*func)(real *, uint32_t *, const int, const real *, const int, const real *, const int) = BUTF[config][p->isa][N]; (*func)(d, p->perm[level], log2len-level, s, log2len-N, p->tbl[N][level], K); } else { void (*func)(real *, uint32_t *, const int, const real *, const int, const real *, const int) = BUTB[config][p->isa][N]; (*func)(d, p->perm[level], log2len-level, s, log2len-N, p->tbl[N][level], K); } } } // Transposer #define LOG2BS 4 #define BS (1 << LOG2BS) #define TRANSPOSE_BLOCK(y2) do { \ for(int x2=y2+1;x2<BS;x2++) { \ element_t r = *(element_t *)&row[y2].r[x2*2+0]; \ *(element_t *)&row[y2].r[x2*2+0] = *(element_t *)&row[x2].r[y2*2+0]; \ *(element_t *)&row[x2].r[y2*2+0] = r; \ }} while(0) static void transpose(real *RESTRICT ALIGNED(256) d, real *RESTRICT ALIGNED(256) s, const int log2n, const int log2m) { if (log2n < LOG2BS || log2m < LOG2BS) { for(int y=0;y<(1 << log2n);y++) { for(int x=0;x<(1 << log2m);x++) { real r0 = s[((y << log2m)+x)*2+0]; real r1 = s[((y << log2m)+x)*2+1]; d[((x << log2n)+y)*2+0] = r0; d[((x << log2n)+y)*2+1] = r1; } } } else { #if defined(__GNUC__) && !defined(__clang__) typedef struct { real __attribute__((vector_size(sizeof(real)*BS*2))) r; } row_t; typedef struct { real __attribute__((vector_size(sizeof(real)*2))) r; } element_t; #else typedef struct { real r[BS*2]; } row_t; typedef struct { real r0, r1; } element_t; #endif for(int y=0;y<(1 << log2n);y+=BS) { for(int x=0;x<(1 << log2m);x+=BS) { row_t row[BS]; for(int y2=0;y2<BS;y2++) { row[y2] = *(row_t *)&s[(((y+y2) << log2m)+x)*2]; } #if LOG2BS == 4 TRANSPOSE_BLOCK( 0); TRANSPOSE_BLOCK( 1); TRANSPOSE_BLOCK( 2); TRANSPOSE_BLOCK( 3); TRANSPOSE_BLOCK( 4); TRANSPOSE_BLOCK( 5); TRANSPOSE_BLOCK( 6); TRANSPOSE_BLOCK( 7); TRANSPOSE_BLOCK( 8); TRANSPOSE_BLOCK( 9); TRANSPOSE_BLOCK(10); TRANSPOSE_BLOCK(11); TRANSPOSE_BLOCK(12); TRANSPOSE_BLOCK(13); TRANSPOSE_BLOCK(14); TRANSPOSE_BLOCK(15); #else for(int y2=0;y2<BS;y2++) { for(int x2=y2+1;x2<BS;x2++) { element_t r = *(element_t *)&row[y2].r[x2*2+0]; *(element_t *)&row[y2].r[x2*2+0] = *(element_t *)&row[x2].r[y2*2+0]; *(element_t *)&row[x2].r[y2*2+0] = r; } } #endif for(int y2=0;y2<BS;y2++) { *(row_t *)&d[(((x+y2) << log2n)+y)*2] = row[y2]; } } } } } #ifdef _OPENMP static void transposeMT(real *RESTRICT ALIGNED(256) d, real *RESTRICT ALIGNED(256) s, int log2n, int log2m) { if (log2n < LOG2BS || log2m < LOG2BS) { for(int y=0;y<(1 << log2n);y++) { for(int x=0;x<(1 << log2m);x++) { real r0 = s[((y << log2m)+x)*2+0]; real r1 = s[((y << log2m)+x)*2+1]; d[((x << log2n)+y)*2+0] = r0; d[((x << log2n)+y)*2+1] = r1; } } } else { #if defined(__GNUC__) && !defined(__clang__) typedef struct { real __attribute__((vector_size(sizeof(real)*BS*2))) r; } row_t; typedef struct { real __attribute__((vector_size(sizeof(real)*2))) r; } element_t; #else typedef struct { real r[BS*2]; } row_t; typedef struct { real r0, r1; } element_t; #endif int y; #pragma omp parallel for for(y=0;y<(1 << log2n);y+=BS) { for(int x=0;x<(1 << log2m);x+=BS) { row_t row[BS]; for(int y2=0;y2<BS;y2++) { row[y2] = *(row_t *)&s[(((y+y2) << log2m)+x)*2]; } #if LOG2BS == 4 TRANSPOSE_BLOCK( 0); TRANSPOSE_BLOCK( 1); TRANSPOSE_BLOCK( 2); TRANSPOSE_BLOCK( 3); TRANSPOSE_BLOCK( 4); TRANSPOSE_BLOCK( 5); TRANSPOSE_BLOCK( 6); TRANSPOSE_BLOCK( 7); TRANSPOSE_BLOCK( 8); TRANSPOSE_BLOCK( 9); TRANSPOSE_BLOCK(10); TRANSPOSE_BLOCK(11); TRANSPOSE_BLOCK(12); TRANSPOSE_BLOCK(13); TRANSPOSE_BLOCK(14); TRANSPOSE_BLOCK(15); #else for(int y2=0;y2<BS;y2++) { for(int x2=y2+1;x2<BS;x2++) { element_t r = *(element_t *)&row[y2].r[x2*2+0]; *(element_t *)&row[y2].r[x2*2+0] = *(element_t *)&row[x2].r[y2*2+0]; *(element_t *)&row[x2].r[y2*2+0] = r; } } #endif for(int y2=0;y2<BS;y2++) { *(row_t *)&d[(((x+y2) << log2n)+y)*2] = row[y2]; } } } } } #endif // #ifdef _OPENMP // Table generator static sc_t r2coefsc(int i, int log2len, int level) { return SINCOSPI((i & ((-1 << (log2len - level)) & ~(-1 << log2len))) * ((real)1.0/(1 << (log2len-1)))); } static sc_t srcoefsc(int i, int log2len, int level) { return SINCOSPI(((3*(i & (-1 << (log2len - level)))) & ~(-1 << log2len)) * ((real)1.0/(1 << (log2len-1)))); } static int makeTableRecurse(real *x, int *p, const int log2len, const int levelorg, const int levelinc, const int sign, const int top, const int bot, const int N, int cnt) { if (levelinc >= N-1) return cnt; const int level = levelorg - levelinc; if (bot - top > 4) { const int bl = 1 << (N - levelinc); const int w = bl/4; for(int j=0;j<(bot-top)/bl;j++) { for(int i=0;i<w;i++) { int a = sign*(p[(levelinc << N) + top+bl*j+i] & (-1 << (log2len - level))); sc_t sc; sc = r2coefsc(a, log2len, level); x[cnt++] = -sc.x; x[cnt++] = -sc.y; sc = srcoefsc(a, log2len, level); x[cnt++] = -sc.x; x[cnt++] = -sc.y; } cnt = makeTableRecurse(x, p, log2len, levelorg, levelinc+1, sign, top+bl*j , top+bl*j + bl/2, N, cnt); cnt = makeTableRecurse(x, p, log2len, levelorg, levelinc+2, sign, top+bl*j + bl/2, top+bl*j + bl , N, cnt); } } else if (bot - top == 4) { int a = sign*(p[(levelinc << N) + top] & (-1 << (log2len - level))); sc_t sc; sc = r2coefsc(a, log2len, level); x[cnt++] = -sc.x; x[cnt++] = -sc.y; sc = srcoefsc(a, log2len, level); x[cnt++] = -sc.x; x[cnt++] = -sc.y; } return cnt; } static uint32_t perm(int nbits, uint32_t k, int s, int d) { s = MIN(MAX(s, 0), nbits); d = MIN(MAX(d, 0), nbits); uint32_t r; r = (((k & 0xaaaaaaaa) >> 1) | ((k & 0x55555555) << 1)); r = (((r & 0xcccccccc) >> 2) | ((r & 0x33333333) << 2)); r = (((r & 0xf0f0f0f0) >> 4) | ((r & 0x0f0f0f0f) << 4)); r = (((r & 0xff00ff00) >> 8) | ((r & 0x00ff00ff) << 8)); r = ((r >> 16) | (r << 16)) >> (32-nbits); return (((r << s) | (k & ~(-1 << s))) & ~(-1 << d)) | ((((k >> s) | (r & (-1 << (nbits-s)))) << d) & ~(-1 << nbits)); } static real **makeTable(int sign, int vecwidth, int log2len, const int N, const int K) { if (log2len < N) return NULL; int *p = (int *)malloc(sizeof(int)*((N+1)<<N)); real **tbl = (real **)calloc(sizeof(real *), (log2len+1)); for(int level=N;level<=log2len;level++) { if (level == log2len && (1 << (log2len-N)) < vecwidth) { tbl[level] = NULL; continue; } int tblOffset = 0; tbl[level] = (real *)Sleef_malloc(sizeof(real) * (K << (level-N))); for(int i0=0;i0 < (1 << (log2len-N));i0+=(1 << (log2len - level))) { for(int j=0;j<N+1;j++) { for(int i=0;i<(1 << N);i++) { p[(j << N) + i] = perm(log2len, i0 + (i << (log2len-N)), log2len-level, log2len-(level-j)); } } int a = -sign*(p[((N-1) << N) + 0] & (-1 << (log2len - level))); sc_t sc = r2coefsc(a, log2len, level-N+1); tbl[level][tblOffset++] = sc.y; tbl[level][tblOffset++] = sc.x; tblOffset = makeTableRecurse(tbl[level], p, log2len, level, 0, sign, 0, 1 << N, N, tblOffset); } if (level == log2len) { real *atbl = (real *)Sleef_malloc(sizeof(real)*(K << (log2len-N))*2); tblOffset = 0; while(tblOffset < (K << (log2len-N))) { for(int k=0;k < K;k++) { for(int v = 0;v < vecwidth;v++) { assert((tblOffset + k * vecwidth + v)*2 + 1 < (K << (log2len-N))*2); atbl[(tblOffset + k * vecwidth + v)*2 + 0] = tbl[log2len][tblOffset + v * K + k]; atbl[(tblOffset + k * vecwidth + v)*2 + 1] = tbl[log2len][tblOffset + v * K + k]; } } tblOffset += K * vecwidth; } Sleef_free(tbl[log2len]); tbl[log2len] = atbl; } } free(p); return tbl; } // Random planner (for debugging) static int searchForRandomPathRecurse(SleefDFT *p, int level, int *path, int *pathConfig, uint64_t tm, int nTrial) { if (level == 0) { p->bestTime = tm; for(uint32_t j = 0;j < p->log2len+1;j++) { p->bestPathConfig[j] = pathConfig[j]; p->bestPath[j] = path[j]; } return nTrial; } if (level < 1) return nTrial-1; for(int i=0;i<10;i++) { int N; do { N = 1 + rand() % MAXBUTWIDTH; } while(p->tm[0][level*(MAXBUTWIDTH+1)+N] >= 1ULL << 60); if (p->vecwidth > (1 << N) || N == p->log2len) continue; path[level] = N; for(;;) { pathConfig[level] = rand() % CONFIGMAX; #if ENABLE_STREAM == 0 pathConfig[level] &= ~1; #endif if ((p->mode2 & SLEEF_MODE2_MT1D) == 0 && (pathConfig[level] & CONFIG_MT) != 0) continue; break; } for(int j = level-1;j >= 0;j--) path[j] = 0; nTrial = searchForRandomPathRecurse(p, level - N, path, pathConfig, 0, nTrial); if (nTrial <= 0) break; if (p->bestTime < 1ULL << 60) break; } return nTrial - 1; } // Planner #define NSHORTESTPATHS 15 #define MAXPATHLEN (MAXLOG2LEN+1) #define POSMAX (CONFIGMAX * MAXLOG2LEN * (MAXBUTWIDTH+1)) static int cln2pos(int config, int level, int N) { return (config * MAXLOG2LEN + level) * MAXBUTWIDTH + N; } static int pos2config(int pos) { return pos == -1 ? -1 : ((pos - 1) / (MAXBUTWIDTH * MAXLOG2LEN)); } static int pos2level(int pos) { return pos == -1 ? -1 : (((pos - 1) / MAXBUTWIDTH) % MAXLOG2LEN); } static int pos2N(int pos) { return pos == -1 ? -1 : ((pos - 1) % MAXBUTWIDTH + 1); } typedef struct { SleefDFT *p; int countu[POSMAX]; int path[NSHORTESTPATHS][MAXPATHLEN]; int pathLen[NSHORTESTPATHS]; uint64_t cost[NSHORTESTPATHS]; int nPaths; int *heap; int *heapLen; uint64_t *heapCost; int heapSize, nPathsInHeap; } ks_t; static ks_t *ksInit(SleefDFT *p) { ks_t *q = calloc(1, sizeof(ks_t)); q->p = p; q->heapSize = 10; q->heap = calloc(q->heapSize, sizeof(int)*MAXPATHLEN); q->heapCost = calloc(q->heapSize, sizeof(uint64_t)); q->heapLen = calloc(q->heapSize, sizeof(int)); return q; } static void ksDispose(ks_t *q) { free(q->heapCost); free(q->heapLen); free(q->heap); free(q); } // returns the number of paths in the heap static int ksSize(ks_t *q) { return q->nPathsInHeap; } // adds a path to the heap static void ksAddPath(ks_t *q, int *path, int pathLen, uint64_t cost) { assert(pathLen <= MAXPATHLEN); if (q->nPathsInHeap == q->heapSize) { q->heapSize *= 2; q->heap = realloc(q->heap, q->heapSize * sizeof(int)*MAXPATHLEN); q->heapCost = realloc(q->heapCost, q->heapSize * sizeof(uint64_t)); q->heapLen = realloc(q->heapLen, q->heapSize * sizeof(int)); } for(int i=0;i<pathLen;i++) q->heap[q->nPathsInHeap * MAXPATHLEN + i] = path[i]; q->heapLen[q->nPathsInHeap] = pathLen; q->heapCost[q->nPathsInHeap] = cost; q->nPathsInHeap++; } // returns the cost of n-th paths in the heap static uint64_t ksCost(ks_t *q, int n) { assert(0 <= n && n < q->nPathsInHeap); return q->heapCost[n]; } // copies the n-th paths in the heap to path, returns its length static int ksGetPath(ks_t *q, int *path, int n) { assert(0 <= n && n < q->nPathsInHeap); int len = q->heapLen[n]; for(int i=0;i<len;i++) path[i] = q->heap[n * MAXPATHLEN + i]; return len; } // removes the n-th paths in the heap static void ksRemove(ks_t *q, int n) { assert(0 <= n && n < q->nPathsInHeap); for(int i=n;i<q->nPathsInHeap-1;i++) { int len = q->heapLen[i+1]; assert(len < MAXPATHLEN); for(int j=0;j<len;j++) q->heap[i * MAXPATHLEN + j] = q->heap[(i+1) * MAXPATHLEN + j]; q->heapLen[i] = q->heapLen[i+1]; q->heapCost[i] = q->heapCost[i+1]; } q->nPathsInHeap--; } // returns the countu value at pos static int ksCountu(ks_t *q, int pos) { assert(0 <= pos && pos < POSMAX); return q->countu[pos]; } // set the countu value at pos to n static void ksSetCountu(ks_t *q, int pos, int n) { assert(0 <= pos && pos < POSMAX); q->countu[pos] = n; } // adds a path as one of the best k paths, returns the number best paths static int ksAddBestPath(ks_t *q, int *path, int pathLen, uint64_t cost) { assert(pathLen <= MAXPATHLEN); assert(q->nPaths < NSHORTESTPATHS); for(int i=0;i<pathLen;i++) q->path[q->nPaths][i] = path[i]; q->pathLen[q->nPaths] = pathLen; q->cost[q->nPaths] = cost; q->nPaths++; return q->nPaths; } // returns if pos is a destination static int ksIsDest(ks_t *q, int pos) { return pos2level(pos) == 0; } // returns n-th adjacent nodes at pos. static int ksAdjacent(ks_t *q, int pos, int n) { if (pos != -1 && pos2level(pos) == 0) return -1; int NMAX = MIN(MIN(q->p->log2len, MAXBUTWIDTH+1), q->p->log2len - q->p->log2vecwidth + 1); if (pos == -1) { int N = n / 2 + MAX(q->p->log2vecwidth, 1); if (N >= NMAX) return -1; return cln2pos((n & 1) * CONFIG_MT, q->p->log2len, N); } int config = (pos2config(pos) & CONFIG_MT); int N = n + 1; int level = pos2level(pos) - pos2N(pos); if (level < 0 || N >= NMAX) return -1; if (level == 0) return n == 0 ? cln2pos(0, 0, 0) : -1; return cln2pos(config, level, N); } static uint64_t ksAdjacentCost(ks_t *q, int pos, int n) { int nxpos = ksAdjacent(q, pos, n); if (nxpos == -1) return 0; int config = pos2config(nxpos), level = pos2level(nxpos), N = pos2N(nxpos); uint64_t ret0 = q->p->tm[config | 0][level*(MAXBUTWIDTH+1) + N]; uint64_t ret1 = q->p->tm[config | 1][level*(MAXBUTWIDTH+1) + N]; return MIN(ret0, ret1); } static void searchForBestPath(SleefDFT *p) { ks_t *q = ksInit(p); for(int i=0;;i++) { int v = ksAdjacent(q, -1, i); if (v == -1) break; uint64_t c = ksAdjacentCost(q, -1, i); int path[1] = { v }; ksAddPath(q, path, 1, c); } while(ksSize(q) != 0) { uint64_t bestCost = 1ULL << 60; int bestPathNum = -1; for(int i=0;i<ksSize(q);i++) { if (ksCost(q, i) < bestCost) { bestCost = ksCost(q, i); bestPathNum = i; } } if (bestPathNum == -1) break; int path[MAXPATHLEN]; int pathLen = ksGetPath(q, path, bestPathNum); uint64_t cost = ksCost(q, bestPathNum); ksRemove(q, bestPathNum); int lastPos = path[pathLen-1]; if (ksCountu(q, lastPos) >= NSHORTESTPATHS) continue; ksSetCountu(q, lastPos, ksCountu(q, lastPos)+1); if (ksIsDest(q, lastPos)) { if (ksAddBestPath(q, path, pathLen, cost) >= NSHORTESTPATHS) break; continue; } for(int i=0;;i++) { int v = ksAdjacent(q, lastPos, i); if (v == -1) break; assert(0 <= pos2N(v) && pos2N(v) <= q->p->log2len); uint64_t c = ksAdjacentCost(q, lastPos, i); path[pathLen] = v; ksAddPath(q, path, pathLen+1, cost + c); } } for(int j = p->log2len;j >= 0;j--) p->bestPath[j] = 0; if (((p->mode & SLEEF_MODE_MEASURE) != 0 || (planFilePathSet && (p->mode & SLEEF_MODE_MEASUREBITS) == 0))) { uint64_t besttm = 1ULL << 62; int bestPath = -1; const int niter = 1 + 5000000 / ((1 << p->log2len) + 1); real *s2 = NULL, *d2 = NULL; const real *s = p->in == NULL ? (s2 = (real *)memset(Sleef_malloc((2 << p->log2len) * sizeof(real)), 0, sizeof(real) * (2 << p->log2len))) : p->in; real *d = p->out == NULL ? (d2 = (real *)memset(Sleef_malloc((2 << p->log2len) * sizeof(real)), 0, sizeof(real) * (2 << p->log2len))) : p->out; #ifdef _OPENMP const int tn = omp_get_thread_num(); #else const int tn = 0; #endif real *t[] = { p->x1[tn], p->x0[tn], d }; for(int mt=0;mt<2;mt++) { for(int i=q->nPaths-1;i>=0;i--) { if (((pos2config(q->path[i][0]) & CONFIG_MT) != 0) != mt) continue; if ((p->mode & SLEEF_MODE_VERBOSE) != 0) { for(int j=0;j<q->pathLen[i];j++) { int N = pos2N(q->path[i][j]); int level = pos2level(q->path[i][j]); int config = pos2config(q->path[i][j]) & ~1; uint64_t t0 = q->p->tm[config | 0][level*(MAXBUTWIDTH+1) + N]; uint64_t t1 = q->p->tm[config | 1][level*(MAXBUTWIDTH+1) + N]; config = t0 < t1 ? config : (config | 1); if (N != 0) printf("%d(%s) ", N, configStr[config]); } } if (mt) startAllThreads(p->nThread); uint64_t tm0 = Sleef_currentTimeMicros(); for(int k=0;k<niter;k++) { int nb = 0; const real *lb = s; if ((p->pathLen & 1) == 1) nb = -1; for(int level = p->log2len, j=0;level >= 1;j++) { assert(pos2level(q->path[i][j]) == level); int N = pos2N(q->path[i][j]); int config = pos2config(q->path[i][j]) & ~1; uint64_t t0 = q->p->tm[config | 0][level*(MAXBUTWIDTH+1) + N]; uint64_t t1 = q->p->tm[config | 1][level*(MAXBUTWIDTH+1) + N]; config = t0 < t1 ? config : (config | 1); dispatch(p, N, t[nb+1], lb, level, config); level -= N; lb = t[nb+1]; nb = (nb + 1) & 1; } } uint64_t tm1 = Sleef_currentTimeMicros(); for(int k=0;k<niter;k++) { int nb = 0; const real *lb = s; if ((p->pathLen & 1) == 1) nb = -1; for(int level = p->log2len, j=0;level >= 1;j++) { assert(pos2level(q->path[i][j]) == level); int N = pos2N(q->path[i][j]); int config = pos2config(q->path[i][j]) & ~1; uint64_t t0 = q->p->tm[config | 0][level*(MAXBUTWIDTH+1) + N]; uint64_t t1 = q->p->tm[config | 1][level*(MAXBUTWIDTH+1) + N]; config = t0 < t1 ? config : (config | 1); dispatch(p, N, t[nb+1], lb, level, config); level -= N; lb = t[nb+1]; nb = (nb + 1) & 1; } } uint64_t tm2 = Sleef_currentTimeMicros(); if ((p->mode & SLEEF_MODE_VERBOSE) != 0) printf(" : %lld %lld\n", (long long int)(tm1 - tm0), (long long int)(tm2 - tm1)); if ((tm1 - tm0) < besttm) { bestPath = i; besttm = tm1 - tm0; } if ((tm2 - tm1) < besttm) { bestPath = i; besttm = tm2 - tm1; } } } for(int level = p->log2len, j=0;level >= 1;j++) { assert(pos2level(q->path[bestPath][j]) == level); int N = pos2N(q->path[bestPath][j]); int config = pos2config(q->path[bestPath][j]) & ~1; uint64_t t0 = q->p->tm[config | 0][level*(MAXBUTWIDTH+1) + N]; uint64_t t1 = q->p->tm[config | 1][level*(MAXBUTWIDTH+1) + N]; config = t0 < t1 ? config : (config | 1); p->bestPath[level] = N; p->bestPathConfig[level] = config; level -= N; } if (d2 != NULL) Sleef_free(d2); if (s2 != NULL) Sleef_free(s2); } else { for(int level = p->log2len, j=0;level >= 1;j++) { int bestPath = 0; assert(pos2level(q->path[bestPath][j]) == level); int N = pos2N(q->path[bestPath][j]); int config = pos2config(q->path[bestPath][j]); p->bestPath[level] = N; p->bestPathConfig[level] = config; level -= N; } } ksDispose(q); } // static uint64_t estimate(int log2len, int level, int N, int config) { uint64_t ret = N * 1000 + ABS(N-3) * 1000; if (log2len >= 14 && (config & CONFIG_MT) != 0) ret /= 2; return ret; } static void measureBut(SleefDFT *p) { if (p->x0 == NULL) return; // #ifdef _OPENMP const int tn = omp_get_thread_num(); #else const int tn = 0; #endif real *s = (real *)memset(p->x0[tn], 0, sizeof(real) * (2 << p->log2len)); real *d = (real *)memset(p->x1[tn], 0, sizeof(real) * (2 << p->log2len)); const int niter = 1 + 100000 / ((1 << p->log2len) + 1); #define MEASURE_REPEAT 4 for(int rep=1;rep<=MEASURE_REPEAT;rep++) { for(int config=0;config<CONFIGMAX;config++) { #if ENABLE_STREAM == 0 if ((config & 1) != 0) continue; #endif if ((p->mode2 & SLEEF_MODE2_MT1D) == 0 && (config & CONFIG_MT) != 0) continue; for(uint32_t level = p->log2len;level >= 1;level--) { for(uint32_t N=1;N<=MAXBUTWIDTH;N++) { if (level < N || p->log2len <= N) continue; if (level == N) { if ((int)p->log2len - (int)level < p->log2vecwidth) continue; uint64_t tm = Sleef_currentTimeMicros(); for(int i=0;i<niter*2;i++) { dispatch(p, N, d, s, level, config); } tm = Sleef_currentTimeMicros() - tm + 1; p->tm[config][level*(MAXBUTWIDTH+1)+N] = MIN(p->tm[config][level*(MAXBUTWIDTH+1)+N], tm); } else if (level == p->log2len) { if (p->tbl[N] == NULL || p->tbl[N][level] == NULL) continue; if (p->vecwidth > (1 << N)) continue; if ((config & CONFIG_MT) != 0) { int i1; #ifdef _OPENMP #pragma omp parallel for #endif for(i1=0;i1 < (1 << (p->log2len-N-p->log2vecwidth));i1++) { int i0 = i1 << p->log2vecwidth; p->perm[level][i1] = 2*perm(p->log2len, i0, p->log2len-level, p->log2len-(level-N)); } } else { for(int i0=0, i1=0;i0 < (1 << (p->log2len-N));i0+=p->vecwidth, i1++) { p->perm[level][i1] = 2*perm(p->log2len, i0, p->log2len-level, p->log2len-(level-N)); } } uint64_t tm = Sleef_currentTimeMicros(); for(int i=0;i<niter;i++) { dispatch(p, N, d, s, level, config); dispatch(p, N, s, d, level, config); } tm = Sleef_currentTimeMicros() - tm + 1; p->tm[config][level*(MAXBUTWIDTH+1)+N] = MIN(p->tm[config][level*(MAXBUTWIDTH+1)+N], tm); } else { if (p->tbl[N] == NULL || p->tbl[N][level] == NULL) continue; if (p->vecwidth > 2 && p->log2len <= N+2) continue; if ((int)p->log2len - (int)level < p->log2vecwidth) continue; if ((config & CONFIG_MT) != 0) { int i1; #ifdef _OPENMP #pragma omp parallel for #endif for(i1=0;i1 < (1 << (p->log2len-N-p->log2vecwidth));i1++) { int i0 = i1 << p->log2vecwidth; p->perm[level][i1] = 2*perm(p->log2len, i0, p->log2len-level, p->log2len-(level-N)); } } else { for(int i0=0, i1=0;i0 < (1 << (p->log2len-N));i0+=p->vecwidth, i1++) { p->perm[level][i1] = 2*perm(p->log2len, i0, p->log2len-level, p->log2len-(level-N)); } } uint64_t tm = Sleef_currentTimeMicros(); for(int i=0;i<niter;i++) { dispatch(p, N, d, s, level, config); dispatch(p, N, s, d, level, config); } tm = Sleef_currentTimeMicros() - tm + 1; p->tm[config][level*(MAXBUTWIDTH+1)+N] = MIN(p->tm[config][level*(MAXBUTWIDTH+1)+N], tm); } } } } } if ((p->mode & SLEEF_MODE_VERBOSE) != 0) { for(uint32_t level = p->log2len;level >= 1;level--) { for(uint32_t N=1;N<=MAXBUTWIDTH;N++) { if (level < N || p->log2len <= N) continue; if (level == N) { if ((int)p->log2len - (int)level < p->log2vecwidth) continue; printf("bot %d, %d, %d, ", p->log2len, level, N); for(int config=0;config<CONFIGMAX;config++) { if (p->tm[config][level*(MAXBUTWIDTH+1)+N] == 1ULL << 60) { printf("N/A, "); } else { printf("%lld, ", (long long int)p->tm[config][level*(MAXBUTWIDTH+1)+N]); } } printf("\n"); } else if (level == p->log2len) { if (p->tbl[N] == NULL || p->tbl[N][level] == NULL) continue; if (p->vecwidth > (1 << N)) continue; printf("top %d, %d, %d, ", p->log2len, level, N); for(int config=0;config<CONFIGMAX;config++) { if (p->tm[config][level*(MAXBUTWIDTH+1)+N] == 1ULL << 60) { printf("N/A, "); } else { printf("%lld, ", (long long int)p->tm[config][level*(MAXBUTWIDTH+1)+N]); } } printf("\n"); } else { if (p->tbl[N] == NULL || p->tbl[N][level] == NULL) continue; if (p->vecwidth > 2 && p->log2len <= N+2) continue; if ((int)p->log2len - (int)level < p->log2vecwidth) continue; printf("mid %d, %d, %d, ", p->log2len, level, N); for(int config=0;config<CONFIGMAX;config++) { if (p->tm[config][level*(MAXBUTWIDTH+1)+N] == 1ULL << 60) { printf("N/A, "); } else { printf("%lld, ", (long long int)p->tm[config][level*(MAXBUTWIDTH+1)+N]); } } printf("\n"); } } } } } static void estimateBut(SleefDFT *p) { for(uint32_t level = p->log2len;level >= 1;level--) { for(uint32_t N=1;N<=MAXBUTWIDTH;N++) { if (level < N || p->log2len <= N) continue; if (level == N) { if ((int)p->log2len - (int)level < p->log2vecwidth) continue; for(int config=0;config<CONFIGMAX;config++) { #if ENABLE_STREAM == 0 if ((config & 1) != 0) continue; #endif p->tm[config][level*(MAXBUTWIDTH+1)+N] = estimate(p->log2len, level, N, config); } } else if (level == p->log2len) { if (p->tbl[N] == NULL || p->tbl[N][level] == NULL) continue; if (p->vecwidth > (1 << N)) continue; for(int config=0;config<CONFIGMAX;config++) { #if ENABLE_STREAM == 0 if ((config & 1) != 0) continue; #endif p->tm[config][level*(MAXBUTWIDTH+1)+N] = estimate(p->log2len, level, N, config); } } else { if (p->tbl[N] == NULL || p->tbl[N][level] == NULL) continue; if (p->vecwidth > 2 && p->log2len <= N+2) continue; if ((int)p->log2len - (int)level < p->log2vecwidth) continue; for(int config=0;config<CONFIGMAX;config++) { #if ENABLE_STREAM == 0 if ((config & 1) != 0) continue; #endif p->tm[config][level*(MAXBUTWIDTH+1)+N] = estimate(p->log2len, level, N, config); } } } } } static int measure(SleefDFT *p, int randomize) { if (p->log2len == 1) { p->bestTime = 1ULL << 60; p->pathLen = 1; p->bestPath[1] = 1; return 1; } if (PlanManager_loadMeasurementResultsP(p, (p->mode & SLEEF_MODE_NO_MT) != 0 ? 1 : 0)) { if ((p->mode & SLEEF_MODE_VERBOSE) != 0) { printf("Path(loaded) : "); for(int j = p->log2len;j >= 0;j--) if (p->bestPath[j] != 0) printf("%d(%s) ", p->bestPath[j], configStr[p->bestPathConfig[j]]); printf("\n"); } return 1; } int toBeSaved = 0; for(uint32_t level = p->log2len;level >= 1;level--) { for(uint32_t N=1;N<=MAXBUTWIDTH;N++) { for(int config=0;config<CONFIGMAX;config++) { p->tm[config][level*(MAXBUTWIDTH+1)+N] = 1ULL << 60; } } } if (((p->mode & SLEEF_MODE_MEASURE) != 0 || (planFilePathSet && (p->mode & SLEEF_MODE_MEASUREBITS) == 0)) && !randomize) { measureBut(p); toBeSaved = 1; } else { estimateBut(p); } int executable = 0; for(int i=1;i<=MAXBUTWIDTH && !executable;i++) { if (p->tm[0][p->log2len*(MAXBUTWIDTH+1)+i] < (1ULL << 60)) executable = 1; } if (!executable) return 0; p->bestTime = 1ULL << 60; p->bestPath[p->log2len] = 0; if (!randomize) { searchForBestPath(p); } else { int path[MAXLOG2LEN+1]; int pathConfig[MAXLOG2LEN+1]; for(int j = p->log2len;j >= 0;j--) path[j] = pathConfig[j] = 0; int nTrial = 100000; do { nTrial = searchForRandomPathRecurse(p, p->log2len, path, pathConfig, 0, nTrial); } while(p->bestTime == 1ULL << 60 && nTrial >= 0); } if (p->bestPath[p->log2len] == 0) return 0; p->pathLen = 0; for(int j = p->log2len;j >= 0;j--) if (p->bestPath[j] != 0) p->pathLen++; if ((p->mode & SLEEF_MODE_VERBOSE) != 0) { printf("Path"); if (randomize) printf("(random) :"); else if (toBeSaved) printf("(measured) :"); else printf("(estimated) :"); for(int j = p->log2len;j >= 0;j--) if (p->bestPath[j] != 0) printf("%d(%s) ", p->bestPath[j], configStr[p->bestPathConfig[j]]); printf("\n"); } if (toBeSaved) { PlanManager_saveMeasurementResultsP(p, (p->mode & SLEEF_MODE_NO_MT) != 0 ? 1 : 0); } return 1; } static void measureTranspose(SleefDFT *p) { if (PlanManager_loadMeasurementResultsT(p)) { if ((p->mode & SLEEF_MODE_VERBOSE) != 0) printf("transpose NoMT(loaded): %lld\n", (long long int)p->tmNoMT); if ((p->mode & SLEEF_MODE_VERBOSE) != 0) printf("transpose MT(loaded): %lld\n", (long long int)p->tmMT); return; } if ((p->mode & SLEEF_MODE_MEASURE) == 0 && (!planFilePathSet || (p->mode & SLEEF_MODE_MEASUREBITS) != 0)) { if (p->log2hlen + p->log2vlen >= 14) { p->tmNoMT = 20; p->tmMT = 10; if ((p->mode & SLEEF_MODE_VERBOSE) != 0) printf("transpose : selected MT(estimated)\n"); } else { p->tmNoMT = 10; p->tmMT = 20; if ((p->mode & SLEEF_MODE_VERBOSE) != 0) printf("transpose : selected NoMT(estimated)\n"); } return; } real *tBuf2 = (real *)Sleef_malloc(sizeof(real)*2*p->hlen*p->vlen); const int niter = 1 + 5000000 / (p->hlen * p->vlen + 1); uint64_t tm; tm = Sleef_currentTimeMicros(); for(int i=0;i<niter;i++) { transpose(tBuf2, p->tBuf, p->log2hlen, p->log2vlen); transpose(tBuf2, p->tBuf, p->log2vlen, p->log2hlen); } p->tmNoMT = Sleef_currentTimeMicros() - tm + 1; if ((p->mode & SLEEF_MODE_VERBOSE) != 0) printf("transpose NoMT(measured): %lld\n", (long long int)p->tmNoMT); #ifdef _OPENMP tm = Sleef_currentTimeMicros(); for(int i=0;i<niter;i++) { transposeMT(tBuf2, p->tBuf, p->log2hlen, p->log2vlen); transposeMT(tBuf2, p->tBuf, p->log2vlen, p->log2hlen); } p->tmMT = Sleef_currentTimeMicros() - tm + 1; if ((p->mode & SLEEF_MODE_VERBOSE) != 0) printf("transpose MT(measured): %lld\n", (long long int)p->tmMT); #else p->tmMT = p->tmNoMT*2; #endif Sleef_free(tBuf2); PlanManager_saveMeasurementResultsT(p); } // Implementation of SleefDFT_*_init1d EXPORT SleefDFT *INIT(uint32_t n, const real *in, real *out, uint64_t mode) { SleefDFT *p = (SleefDFT *)calloc(1, sizeof(SleefDFT)); p->magic = MAGIC; p->baseTypeID = BASETYPEID; p->in = (const void *)in; p->out = (void *)out; // Mode p->mode = mode; if ((p->mode & SLEEF_MODE_NO_MT) == 0) { p->mode2 |= SLEEF_MODE2_MT1D; } if ((mode & SLEEF_MODE_REAL) != 0) n /= 2; p->log2len = ilog2(n); if (p->log2len <= 1) return p; if ((mode & SLEEF_MODE_ALT) != 0) p->mode = mode = mode ^ SLEEF_MODE_BACKWARD; #ifdef _OPENMP p->nThread = omp_thread_count(); #else p->nThread = 1; p->mode2 &= ~SLEEF_MODE2_MT1D; #endif // ISA availability int bestPriority = -1; p->isa = -1; for(int i=0;i<ISAMAX;i++) { if (checkISAAvailability(i) && bestPriority < (*GETINT[i])(GETINT_DFTPRIORITY) && n >= (*GETINT[i])(GETINT_VECWIDTH) * (*GETINT[i])(GETINT_VECWIDTH)) { bestPriority = (*GETINT[i])(GETINT_DFTPRIORITY); p->isa = i; } } if (p->isa == -1) { if ((p->mode & SLEEF_MODE_VERBOSE) != 0) printf("ISA not available\n"); p->magic = 0; free(p); return NULL; } // Tables p->perm = (uint32_t **)calloc(sizeof(uint32_t *), p->log2len+1); for(int level = p->log2len;level >= 1;level--) { p->perm[level] = (uint32_t *)Sleef_malloc(sizeof(uint32_t) * ((1 << p->log2len) + 8)); } p->x0 = malloc(sizeof(real *) * p->nThread); p->x1 = malloc(sizeof(real *) * p->nThread); for(int i=0;i<p->nThread;i++) { p->x0[i] = (real *)Sleef_malloc(sizeof(real) * 2 * n); p->x1[i] = (real *)Sleef_malloc(sizeof(real) * 2 * n); } if ((mode & SLEEF_MODE_REAL) != 0) { p->rtCoef0 = (real *)Sleef_malloc(sizeof(real) * n); p->rtCoef1 = (real *)Sleef_malloc(sizeof(real) * n); if ((mode & SLEEF_MODE_BACKWARD) == 0) { for(uint32_t i=0;i<n/2;i++) { sc_t sc = SINCOSPI(i*((real)-1.0/n)); ((real *)p->rtCoef0)[i*2+0] = ((real *)p->rtCoef0)[i*2+1] = (real)0.5 - (real)0.5 * sc.x; ((real *)p->rtCoef1)[i*2+0] = ((real *)p->rtCoef1)[i*2+1] = (real)0.5*sc.y; } } else { for(uint32_t i=0;i<n/2;i++) { sc_t sc = SINCOSPI(i*((real)-1.0/n)); ((real *)p->rtCoef0)[i*2+0] = ((real *)p->rtCoef0)[i*2+1] = (real)0.5 + (real)0.5 * sc.x; ((real *)p->rtCoef1)[i*2+0] = ((real *)p->rtCoef1)[i*2+1] = (real)0.5*sc.y; } } } // Measure int sign = (mode & SLEEF_MODE_BACKWARD) != 0 ? -1 : 1; p->vecwidth = (*GETINT[p->isa])(GETINT_VECWIDTH); p->log2vecwidth = ilog2(p->vecwidth); for(int i=1;i<=MAXBUTWIDTH;i++) { ((real ***)p->tbl)[i] = makeTable(sign, p->vecwidth, p->log2len, i, constK[i]); } if (!measure(p, (mode & SLEEF_MODE_DEBUG))) { // Fall back to the first ISA freeTables(p); p->isa = 0; p->vecwidth = (*GETINT[p->isa])(GETINT_VECWIDTH); p->log2vecwidth = ilog2(p->vecwidth); for(int i=1;i<=MAXBUTWIDTH;i++) { ((real ***)p->tbl)[i] = makeTable(sign, p->vecwidth, p->log2len, i, constK[i]); } for(int level = p->log2len;level >= 1;) { int N = ABS(p->bestPath[level]); if (level == N) { level -= N; continue; } int i1 = 0; for(int i0=0;i0 < (1 << (p->log2len-N));i0+=p->vecwidth, i1++) { p->perm[level][i1] = 2*perm(p->log2len, i0, p->log2len-level, p->log2len-(level-N)); } for(;i1 < (1 << p->log2len) + 8;i1++) p->perm[level][i1] = 0; level -= N; } if (!measure(p, (mode & SLEEF_MODE_DEBUG))) { if ((p->mode & SLEEF_MODE_VERBOSE) != 0) printf("Suitable ISA not found. This should not happen.\n"); return NULL; } } for(int level = p->log2len;level >= 1;) { int N = ABS(p->bestPath[level]); if (level == N) { level -= N; continue; } int i1 = 0; for(int i0=0;i0 < (1 << (p->log2len-N));i0+=p->vecwidth, i1++) { p->perm[level][i1] = 2*perm(p->log2len, i0, p->log2len-level, p->log2len-(level-N)); } for(;i1 < (1 << p->log2len) + 8;i1++) p->perm[level][i1] = 0; level -= N; } if ((p->mode & SLEEF_MODE_VERBOSE) != 0) printf("ISA : %s %d bit %s\n", (char *)(*GETPTR[p->isa])(0), (int)(GETINT[p->isa](GETINT_VECWIDTH) * sizeof(real) * 16), BASETYPESTRING); return p; } // Implementation of SleefDFT_*_init2d EXPORT SleefDFT *INIT2D(uint32_t vlen, uint32_t hlen, const real *in, real *out, uint64_t mode) { SleefDFT *p = (SleefDFT *)calloc(1, sizeof(SleefDFT)); p->magic = MAGIC2D; p->mode = mode; p->baseTypeID = BASETYPEID; p->in = in; p->out = out; p->hlen = hlen; p->log2hlen = ilog2(hlen); p->vlen = vlen; p->log2vlen = ilog2(vlen); uint64_t mode1D = mode; mode1D |= SLEEF_MODE_NO_MT; if ((mode & SLEEF_MODE_NO_MT) == 0) p->mode3 |= SLEEF_MODE3_MT2D; p->instH = p->instV = INIT(hlen, NULL, NULL, mode1D); if (hlen != vlen) p->instV = INIT(vlen, NULL, NULL, mode1D); p->tBuf = (void *)Sleef_malloc(sizeof(real)*2*hlen*vlen); measureTranspose(p); return p; } // Implementation of SleefDFT_*_execute EXPORT void EXECUTE(SleefDFT *p, const real *s0, real *d0) { assert(p != NULL && (p->magic == MAGIC || p->magic == MAGIC2D)); const real *s = s0 == NULL ? p->in : s0; real *d = d0 == NULL ? p->out : d0; if (p->magic == MAGIC2D) { // S -> T -> D -> T -> D real *tBuf = (real *)(p->tBuf); #ifdef _OPENMP if ((p->mode3 & SLEEF_MODE3_MT2D) != 0 && (((p->mode & SLEEF_MODE_DEBUG) == 0 && p->tmMT < p->tmNoMT) || ((p->mode & SLEEF_MODE_DEBUG) != 0 && (rand() & 1)))) { int y; #pragma omp parallel for for(y=0;y<p->vlen;y++) { EXECUTE(p->instH, &s[p->hlen*2*y], &tBuf[p->hlen*2*y]); } transposeMT(d, tBuf, p->log2vlen, p->log2hlen); #pragma omp parallel for for(y=0;y<p->hlen;y++) { EXECUTE(p->instV, &d[p->vlen*2*y], &tBuf[p->vlen*2*y]); } transposeMT(d, tBuf, p->log2hlen, p->log2vlen); } else #endif { for(int y=0;y<p->vlen;y++) { EXECUTE(p->instH, &s[p->hlen*2*y], &tBuf[p->hlen*2*y]); } transpose(d, tBuf, p->log2vlen, p->log2hlen); for(int y=0;y<p->hlen;y++) { EXECUTE(p->instV, &d[p->vlen*2*y], &tBuf[p->vlen*2*y]); } transpose(d, tBuf, p->log2hlen, p->log2vlen); } return; } if (p->log2len <= 1) { if ((p->mode & SLEEF_MODE_REAL) == 0) { real r0 = s[0] + s[2]; real r1 = s[1] + s[3]; real r2 = s[0] - s[2]; real r3 = s[1] - s[3]; d[0] = r0; d[1] = r1; d[2] = r2; d[3] = r3; } else { if ((p->mode & SLEEF_MODE_ALT) == 0) { if (p->log2len == 1) { if ((p->mode & SLEEF_MODE_BACKWARD) == 0) { real r0 = s[0] + s[2] + (s[1] + s[3]); real r1 = s[0] + s[2] - (s[1] + s[3]); real r2 = s[0] - s[2]; real r3 = s[3] - s[1]; d[0] = r0; d[1] = 0; d[2] = r2; d[3] = r3; d[4] = r1; d[5] = 0; } else { real r0 = (s[0] + s[4])*(real)0.5 + s[2]; real r1 = (s[0] - s[4])*(real)0.5 - s[3]; real r2 = (s[0] + s[4])*(real)0.5 - s[2]; real r3 = (s[0] - s[4])*(real)0.5 + s[3]; d[0] = r0*2; d[1] = r1*2; d[2] = r2*2; d[3] = r3*2; } } else { if ((p->mode & SLEEF_MODE_BACKWARD) == 0) { real r0 = s[0] + s[1]; real r1 = s[0] - s[1]; d[0] = r0; d[1] = 0; d[2] = r1; d[3] = 0; } else { real r0 = s[0] + s[2]; real r1 = s[0] - s[2]; d[0] = r0; d[1] = r1; } } } else { if (p->log2len == 1) { if ((p->mode & SLEEF_MODE_BACKWARD) == 0) { real r0 = s[0] + s[2] + (s[1] + s[3]); real r1 = s[0] + s[2] - (s[1] + s[3]); real r2 = s[0] - s[2]; real r3 = s[1] - s[3]; d[0] = r0; d[1] = r1; d[2] = r2; d[3] = r3; } else { real r0 = (s[0] + s[1])*(real)0.5 + s[2]; real r1 = (s[0] - s[1])*(real)0.5 + s[3]; real r2 = (s[0] + s[1])*(real)0.5 - s[2]; real r3 = (s[0] - s[1])*(real)0.5 - s[3]; d[0] = r0; d[1] = r1; d[2] = r2; d[3] = r3; } } else { real c = ((p->mode & SLEEF_MODE_BACKWARD) != 0) ? (real)0.5 : (real)1.0; real r0 = s[0] + s[1]; real r1 = s[0] - s[1]; d[0] = r0 * c; d[1] = r1 * c; } } } return; } // #ifdef _OPENMP const int tn = omp_get_thread_num(); real *t[] = { p->x1[tn], p->x0[tn], d }; #else real *t[] = { p->x1[0], p->x0[0], d }; #endif const real *lb = s; int nb = 0; if ((p->mode & SLEEF_MODE_REAL) != 0 && (p->pathLen & 1) == 0 && ((p->mode & SLEEF_MODE_BACKWARD) != 0) != ((p->mode & SLEEF_MODE_ALT) != 0)) nb = -1; if ((p->mode & SLEEF_MODE_REAL) == 0 && (p->pathLen & 1) == 1) nb = -1; if ((p->mode & SLEEF_MODE_REAL) != 0 && ((p->mode & SLEEF_MODE_BACKWARD) != 0) != ((p->mode & SLEEF_MODE_ALT) != 0)) { (*REALSUB1[p->isa])(t[nb+1], s, p->log2len, p->rtCoef0, p->rtCoef1, (p->mode & SLEEF_MODE_ALT) == 0); if ((p-> mode & SLEEF_MODE_ALT) == 0) t[nb+1][(1 << p->log2len)+1] = -s[(1 << p->log2len)+1] * 2; lb = t[nb+1]; nb = (nb + 1) & 1; } for(int level = p->log2len;level >= 1;) { int N = ABS(p->bestPath[level]), config = p->bestPathConfig[level]; dispatch(p, N, t[nb+1], lb, level, config); level -= N; lb = t[nb+1]; nb = (nb + 1) & 1; } if ((p->mode & SLEEF_MODE_REAL) != 0 && ((p->mode & SLEEF_MODE_BACKWARD) == 0) != ((p->mode & SLEEF_MODE_ALT) != 0)) { (*REALSUB0[p->isa])(d, lb, p->log2len, p->rtCoef0, p->rtCoef1); if ((p->mode & SLEEF_MODE_ALT) == 0) { d[(1 << p->log2len)+1] = -d[(1 << p->log2len)+1]; d[(2 << p->log2len)+0] = d[1]; d[(2 << p->log2len)+1] = 0; d[1] = 0; } } }
AlgoritmoProyectoSDFicherosMPIfin.c
#include <stdio.h> #include <stdlib.h> #include <mpi.h> #include <omp.h> #include <math.h> #include <time.h> #include <string.h> #define FIL 1000 #define COL 24 #define TAG 0 float calculaMAPE(); int main(int argc, char **argv) { char nombreFicheroProcesado[] = "Datos_1x.txt"; //Escribir aquí nombre fichero que se quiere leer // ** Declaraci�n de variables char res[1001][192], filas[10]; int col = 24; float datos[641664]; MPI_Status stat; MPI_File mfile; // ** Inicio del entorno MPI MPI_Init(&argc, &argv); MPI_File_open(MPI_COMM_WORLD, "Datos_1x.txt", MPI_MODE_RDONLY, MPI_INFO_NULL, &mfile); int cont = 0; int cuenta = 192; int i = 0; MPI_File_read_at(mfile, 0, filas, 9, MPI_CHAR, MPI_STATUS_IGNORE); int fil = atoi(filas); int p = 1; for (int i = 0; i < 1001; i++) { //65.237.962(1000) //451.629(1)+cont//4.965.307(10)+cont//61.063.885(100)+cont MPI_File_read_at_all(mfile, 451629 + cont + (cuenta * cont), res[i], cuenta - 1, MPI_CHAR, MPI_STATUS_IGNORE); cont++; p++; } float dias[1001][24]; for (int s = 0; s < 1001; s++) { char *token = NULL; token = strtok(res[s], ","); while (token) { //printf("token:%s\n",token); dias[s][i] = atoi(token); token = strtok(NULL, ","); i++; } i = 0; } printf("\n filas:%d Usadas: %d-%d\n", fil, fil + 1 - 1000, fil + 1); int j, indiceDiaMAPEMasBajo = 0, k = 2, spid; //Última línea float ultDia[FIL], diaActual[FIL], vectorMAPESRealizados[FIL], mediaAritmeticaVecinos[FIL]; int pid = 0, prn, splitSize, restSize, rows = 0, offset = 0; //Calculo porcentaje MAPE float MAPEActual = 0, MAPEGeneral = 500.00; //MAPEGeneral se puede ajustar, dependiendo del valor que se le ponga, saldrán unos resultados u otros // ** Obtención del PID MPI_Comm_rank(MPI_COMM_WORLD, &pid); // ** Obtención del número de PRN MPI_Comm_size(MPI_COMM_WORLD, &prn); clock_t begin = clock(); //Empiezo a calcular el tiempo de ejecucción if (pid == 0) { //hilo maestro printf("\n[%d]: Volcando en un vector, los datos del último día", pid); //Paso el último día a un vector propio for (j = 0; j < COL; j++) { ultDia[j] = dias[999][j]; printf("%f", ultDia[j]); } //** Envío a los esclavos: for (spid = 1; spid <= prn - 1; spid++) { MPI_Send(&ultDia, COL, MPI_FLOAT, spid, 0, MPI_COMM_WORLD); MPI_Send(&diaActual, COL, MPI_FLOAT, spid, 0, MPI_COMM_WORLD); MPI_Send(&dias, FIL * COL, MPI_FLOAT, spid, 0, MPI_COMM_WORLD); } //** Recepción de resultados for (spid = 1; spid <= prn - 1; spid++) { MPI_Recv(&vectorMAPESRealizados,COL , MPI_FLOAT, spid, 0, MPI_COMM_WORLD, NULL); MPI_Recv(&mediaAritmeticaVecinos, COL, MPI_FLOAT, spid, 0, MPI_COMM_WORLD, NULL); } //** Impresión printf("\nPredicciones para el día actual"); for (j = 0; j < COL; j++) { printf("\n Hora: %d --> %f", j + 1, mediaAritmeticaVecinos[j]); } clock_t end = clock(); //Paro el tiempo de ejecucción double tiempo = (double)(end - begin) / CLOCKS_PER_SEC; //FICHERO TIEMPO printf("Procediendo a crear el fichero tiempo"); FILE *fichero; fichero = fopen("tiempos.txt", "w"); fprintf(fichero,"%s","\n Nombre fichero:"); fprintf(fichero,"%s",nombreFicheroProcesado); fprintf(fichero,"%s","\nTiempo de ejecucción programa(s):"); fprintf(fichero,"%f",tiempo); fclose(fichero); printf("\nFichero de tiempo creado con éxito"); //FICHERO PREDICCIONES printf("Procediendo a crear el fichero prediciones"); int j; FILE *fichero1; fichero1 = fopen("predicciones.txt", "w"); for (j = 0; j < COL; j++) { fprintf(fichero1,"%s%d%s","\n Prediccion",j+1,":"); fprintf(fichero1,"%f",mediaAritmeticaVecinos[j]); } fclose(fichero1); printf("\nFichero de predicciones creado con éxito"); //FICHERO MAPE printf("Procediendo a crear el fichero mape"); FILE *fichero2; fichero2 = fopen("mape.txt", "w"); for (j = 0; j < sizeof(vectorMAPESRealizados); j++) { fprintf(fichero2,"%s%d%s","\nMape de la fila",j+1,":"); fprintf(fichero2,"%f",vectorMAPESRealizados[j]); } fclose(fichero2); printf("\nFichero de mape creado con éxito"); //Limpiamos memoria fflush(stdout); fflush(stdin); } else { //hilo esclavo //** Recepción de resultados MPI_Recv(&ultDia, COL, MPI_FLOAT, 0, 0, MPI_COMM_WORLD, NULL); MPI_Recv(&diaActual, COL, MPI_FLOAT, 0, 0, MPI_COMM_WORLD, NULL); MPI_Recv(&dias, FIL * COL, MPI_FLOAT, 0, 0, MPI_COMM_WORLD, NULL); // ** REGION PARALELA. Cálculo del vector vectorMAPE en paralelo con 4 hilos #pragma omp parallel for num_threads(4) for (i = 0; i < FIL; i++) { for (j = 0; j < COL; j++) { diaActual[j] = dias[i][j]; } MAPEActual = calculaMAPE(diaActual, ultDia); vectorMAPESRealizados[i] = MAPEActual; printf("PID: [%d]/[%d] calculandoo el día [%d] con mape:%f\n", pid, omp_get_thread_num(),i,MAPEActual); if (MAPEActual < MAPEGeneral) { MAPEGeneral = MAPEActual; indiceDiaMAPEMasBajo = i; } } //Calculo la media aritmética de los dos días siguientes al del MAPE más bajo for (j = 0; j < FIL; j++) { mediaAritmeticaVecinos[j] = (dias[indiceDiaMAPEMasBajo + 1][j] + dias[indiceDiaMAPEMasBajo + 2][j]) / 2; } //** Envío al maestro: MPI_Send(&mediaAritmeticaVecinos, COL, MPI_FLOAT, 0, 0, MPI_COMM_WORLD); MPI_Send(&vectorMAPESRealizados, COL, MPI_FLOAT, 0, 0, MPI_COMM_WORLD); } // ** Finalización del entorno MPI MPI_Finalize(); return 0; //FIN } float calculaMAPE(float diaActual[], float ultDia[]) { float MAPEActual = 0; for (int j = 0; j < 24; j++) { MAPEActual += (fabs((ultDia[j] - diaActual[j]) / ultDia[j])); } MAPEActual *= 100 / 24; MAPEActual *= 100; //Para calcular el porcentaje return MAPEActual; }
metadirective.c
#include <stdio.h> #include <omp.h> #define N 10 #define GPU_THREAD_COUNT 256 int check_device_kind_gpu_selector() { int threadCount = 0; #pragma omp target map(tofrom: threadCount) { #pragma omp metadirective \ when(device = {kind(gpu)}: parallel) \ default(single) threadCount = omp_get_num_threads(); } if (threadCount != GPU_THREAD_COUNT) { printf("Failed metadirective: device_kind_gpu_selector\n"); return 0; } return 1; } int check_device_kind_cpu_selector() { int threadCount = 0; #pragma omp target map(tofrom: threadCount) { #pragma omp metadirective \ when(device = {kind(cpu, host)}: parallel) \ default(single) threadCount = omp_get_num_threads(); } if (threadCount != 1) { printf("Failed metadirective: device_kind_cpu_selector\n"); return 0; } return 1; } int check_device_arch_amdgcn_selector() { int threadCount = 0; #pragma omp target map(tofrom: threadCount) { #pragma omp metadirective \ when(device = {arch("amdgcn")}: parallel) \ default(single) threadCount = omp_get_num_threads(); } if (threadCount != GPU_THREAD_COUNT) { printf("Failed metadirective: device_arch_amdgcn_selector\n"); return 0; } return 1; } int check_device_arch_x86_64_selector() { int threadCount = 0; #pragma omp target map(tofrom: threadCount) { #pragma omp metadirective \ when(device = {arch("x86_64")}: parallel) \ default(single) threadCount = omp_get_num_threads(); } if (threadCount != 1) { printf("Failed metadirective: device_arch_x86_64_selector\n"); return 0; } return 1; } int check_device_isa_feature_selector() { int threadCount = 0; #pragma omp target map(tofrom: threadCount) { #pragma omp metadirective \ when(device = {isa("flat-address-space")}: parallel) \ default(single) threadCount = omp_get_num_threads(); } if (threadCount != GPU_THREAD_COUNT) { printf("Failed metadirective: device_isa_feature_selector\n"); return 0; } return 1; } int check_implementation_vendor_selector() { int threadCount = 0; #pragma omp target map(tofrom: threadCount) { #pragma omp metadirective \ when(implementation = {vendor(amd)}: parallel) \ default(single) threadCount = omp_get_num_threads(); } if (threadCount != GPU_THREAD_COUNT) { printf("Failed metadirective: implementation_vendor_selector\n"); return 0; } return 1; } int check_scoring() { int threadCount = 0; #pragma omp target map(tofrom: threadCount) { #pragma omp metadirective \ when(implementation = {vendor(score(20): amd)}: parallel num_threads(4))\ when(implementation = {vendor(score(100): amd)}: parallel num_threads(8))\ default(single) threadCount = omp_get_num_threads(); } if (threadCount > 8) { printf("Failed metadirective: scoring\n"); return 0; } return 1; } int check_extension_match_any() { int threadCount = 0; #pragma omp target map(tofrom: threadCount) { #pragma omp metadirective \ when(device = {kind(cpu), arch("amdgcn")}, \ implementation = {extension(match_any)} \ : parallel)\ default(single) threadCount = omp_get_num_threads(); } if (threadCount != GPU_THREAD_COUNT) { printf("Failed metadirective: check_extension_match_any\n"); return 0; } return 1; } int check_extension_match_all() { int threadCount = 0; #pragma omp target map(tofrom: threadCount) { #pragma omp metadirective \ when(device = {kind(cpu), arch("amdgcn")}, \ implementation = {extension(match_all)} \ : parallel)\ default(single) threadCount = omp_get_num_threads(); } if (threadCount != 1) { printf("Failed metadirective: check_extension_match_all\n"); return 0; } return 1; } int check_static_condition_selector() { int threadCount = 0; #pragma omp target map(tofrom: threadCount) { #pragma omp metadirective \ when(user = {condition(N > 5)}: parallel num_threads(4)) \ default(single) threadCount = omp_get_num_threads(); } if (threadCount > 4) { printf("Failed metadirective: static_condition_selector\n"); return 0; } return 1; } int main(void) { if (!check_device_kind_gpu_selector() || !check_device_kind_cpu_selector() || !check_device_arch_amdgcn_selector() || !check_device_arch_x86_64_selector() || !check_device_isa_feature_selector() || !check_implementation_vendor_selector() || !check_scoring() || !check_extension_match_any() || !check_extension_match_all() || !check_static_condition_selector()) { return -1; } printf("Success\n"); return 0; }
main.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> void oddEvenSort (int *a, int N); int main() { int N, i; printf("Inserire il numero degli elementi del vettore da ordinare: "); scanf("%d", &N); int *a = (int *)malloc(N*sizeof(int)); for(i = 0; i < N; i++) { printf("Inserire elemento [%d] nel vettore: ", i); scanf("%d", &a[i]); } oddEvenSort(a,N); printf("\nVettore ordinato: "); for(i = 0; i < N; i++) { printf("%d\t", a[i]); } return 0; } void oddEvenSort (int *a, int N) { int sw1 = 1, start = 0, i; int temp; while(sw1 || start) { sw1 = 0; #pragma omp parallel for private(temp) for(i = start; i < N - 1; i += 2) //Ciclo for sugli elementi di indice pari. { if(a[i] > a[i+1]) { temp = a[i]; a[i] = a[i+1]; a[i+1] = temp; sw1 = 1; } } if(start == 0) { start = 1; } else start = 0; } }
master-worker-omp.c
# include <stdio.h> # include <stdlib.h> # include <omp.h> struct { int maxtask; int task; } taskinfo = { 0, 0 }; void work(void); void get_task(int *nexttask); int main(int argc, char *argv[]) { taskinfo.maxtask = 40; #pragma omp parallel work(); return 0; } void get_task(int *nexttask) { #pragma omp critical { if (taskinfo.task < taskinfo.maxtask) { ++taskinfo.task; *nexttask = taskinfo.task; } else { *nexttask = -1; } } } void work(void) { int task; do { get_task(&task); if (task >= 0) { printf("thread %d: working on task %d\n", (int) omp_get_thread_num(), task); system("sleep 1"); } } while (task >= 0); }
Marginals.h
/* +---------------------------------+ | | | *** Marginals calculation *** | | | | Copyright (c) -tHE SWINe- 2013 | | | | Marginals.h | | | +---------------------------------+ */ #pragma once #ifndef __MARGINALS_INCLUDED #define __MARGINALS_INCLUDED /** * @file include/slam/Marginals.h * @date 2013 * @author -tHE SWINe- * @brief calculation of marginal covariances in sparse systems */ #include "slam/BlockMatrix.h" //#include "slam/Timer.h" // included from slam/BlockMatrix.h //#include "slam/Integer.h" // included from slam/BlockMatrix.h #include "slam/IncrementalPolicy.h" // block matrix part names #include "eigen/Eigen/Core" #include "slam/OrderingMagic.h" /** \addtogroup covs * @{ */ /** * @def __MARGINALS_RECURRENT_KERNEL_USE_DENSE_BLOCK_LOOKUP * @brief if defined, dense lookup is used to get blocks of the marginals * @note This quickly runs out of memory (100k would not fit), and it is * slower for larger problems than without it (10k with 0.49 sec, * without 0.24 sec). */ //#define __MARGINALS_RECURRENT_KERNEL_USE_DENSE_BLOCK_LOOKUP /** * @def __MARGINALS_RECURRENT_KERNEL_USE_BLOCKY_DIAGONAL_LOOP * @brief if defined, blockwise diagonal loop is used instead of elementwise * * with block diag: 0.19 sec 10k, 0.31 sphere, 4.9 sec 100k, 0.163 msec simple example * without: 0.21 sec 10k, 0.33 sphere, 5.3 sec 100k, 0.204 msec simple example * * @note This involves some extra calculation but saves memory traffic, * the payoff will likely diminissh with growing block size. */ #define __MARGINALS_RECURRENT_KERNEL_USE_BLOCKY_DIAGONAL_LOOP /** * @def __MARGINALS_COMPACT_UPDATE * @brief if defined, \ref CMarginals::Update_BlockDiagonalMarginals_FBS_ExOmega() will * use less memory in exchange for potentially unaligned operations */ #define __MARGINALS_COMPACT_UPDATE /** * @brief prototype marginal covariance methods implementation * * @todo (Re)implement non-FBS version of the fast recurrent. * @todo Implement parallel calculation of more denser matrices or * on-demand calculation of off-diagonal blocks using recurrent (rather simple). */ class CMarginals { protected: public: /** * @brief reference function that calculates dense marginals matrix * * @param[out] r_marginals is filled with the marginals matrix * @param[in] r_R is the Cholesky factor * * @note This function throws std::bad_alloc. */ static void Calculate_DenseMarginals_Ref(Eigen::MatrixXd &r_marginals, const CUberBlockMatrix &r_R) // throw(std::bad_alloc) { const size_t n = r_R.n_Column_Num(); // in elements r_marginals.resize(n, n); Eigen::MatrixXd &R_inv = r_marginals; // R_inv = S for(size_t i = 0, n_block_col = -1, n_col_remains = 1; i < n; ++ i) { double *p_column = &R_inv.col(i)(0); memset(p_column, 0, n * sizeof(double)); p_column[i] = 1; if(!(-- n_col_remains)) // triggers in the first iteration, loads up column width n_col_remains = r_R.n_BlockColumn_Column_Num(++ n_block_col); r_R.UpperTriangular_Solve(p_column, n, n_block_col); // backsub, only the nonzero part of the column (started at (block) column which contains column i, with no loss of generality) } r_marginals = R_inv * R_inv.transpose(); // C = SS^T, might need some memory // calculate the covariance (assume that this is correct) // 2015-08-17 - seems to work ok } /** * @brief slow function that calculates dense marginals matrix * * @param[out] r_marginals is filled with the marginals matrix * @param[in] r_R is the Cholesky factor * * @note This function throws std::bad_alloc. */ static void Calculate_DenseMarginals_Slow(Eigen::MatrixXd &r_marginals, const CUberBlockMatrix &r_R) // throw(std::bad_alloc) { const size_t n = r_R.n_Column_Num(); // in elements r_marginals.resize(n, n); r_marginals.setZero(); // !! Eigen::MatrixXd R_inv_column(n, 1); // R_inv = S double *p_column = &R_inv_column.col(0)(0); // get dense column data from the Eigen matrix (actually only need one) for(size_t i = 0, n_block_col = -1, n_col_remains = 1; i < n; ++ i) { memset(p_column, 0, n * sizeof(double)); p_column[i] = 1; // make a column vector with a single 1 in it if(!(-- n_col_remains)) // triggers in the first iteration, loads up column width n_col_remains = r_R.n_BlockColumn_Column_Num(++ n_block_col); // if done before, it avoids both referencing block col 0 in an empty matrix // and determining no. of columns in one past the last block column at the end size_t UNUSED(n_block_column_size); size_t n_block_column = r_R.n_Find_BlockColumn(i, n_block_column_size); _ASSERTE(n_block_col == n_block_column); // should be the same // get which block column contains column i (optimize this away, probably need to use it when resuming) //_ASSERTE(n_block_column_size <= n_diag_band_width); // make this into a run-time check in the production code // make sure it is not longer than the diagonal (otherwise we will not have enough backlog to calculate all the off-diagonal elements) r_R.UpperTriangular_Solve(p_column, n, n_block_col); // backsub, only the nonzero part of the column (started at (block) column which contains column i, with no loss of generality) // this seems to be O(i) divisions + O(nnz) MADs in the given (block) column range // that sums up to O(n^2/2) divisions + O(nnz log(nnz))?? MADs ... some quadratute anyways std::vector<double> backsub_test(n, 0); backsub_test[i] = 1; // !! r_R.UpperTriangular_Solve(&backsub_test[0], n); // full backsub _ASSERTE(!memcmp(p_column, &backsub_test[0], n * sizeof(double))); // make sure that the result is correct _ASSERTE((Eigen::Map<Eigen::VectorXd, Eigen::Unaligned>(p_column + i + 1, n - i - 1).norm() == 0)); // double pars required because of the comma in Map params // everything below i is zero (true) for(size_t k = 0; k <= i; ++ k) { for(size_t j = 0; j <= i; ++ j) r_marginals(j, k) += p_column[j] * p_column[k]; // it is symmetric, indexing arbitrary } // accumulate the entries of the covariace matrix. this is O(n^3/2) MADs for the full matrix // note that to calculate even only the diagonal, we need full columns } } /** * @brief fast function that calculates a column band of the dense marginals matrix * * @param[out] r_marginals is the marginals matrix * @param[in] r_R is the Cholesky factor * @param[in] n_start_column is zero-based index of the first column (in elements) * @param[in] n_end_column is zero-based index of one past the last column (in elements) * @param[in] p_inv_order is inverse ordering on the R factor * @param[in] n_order_size is size of the ordering (must match number of block columns in R) * @param[in] b_lower_diag_only is lower-diagonal flag * @param[in] b_band_only is band flag (only the specified band of the matrix is then stored) * * @note This function throws std::bad_alloc. */ static void Calculate_DenseMarginals_Fast_ColumnBand(Eigen::MatrixXd &r_marginals, const CUberBlockMatrix &r_R, size_t n_start_column, size_t n_end_column, const size_t *p_inv_order, size_t n_order_size, bool b_lower_diag_only = false, bool b_band_only = false) // throw(std::bad_alloc) { _ASSERTE(!b_lower_diag_only); // missing code to correctly resume, always calculating full _ASSERTE(p_inv_order); _ASSERTE(n_order_size == r_R.n_BlockColumn_Num()); // make sure the ordering is there and has valid size const size_t n = r_R.n_Column_Num(); // in elements _ASSERTE(n_start_column <= n_end_column); _ASSERTE(n_end_column <= n); // not more than that r_marginals.resize(n, (b_band_only)? n_end_column - n_start_column : n); // should already have the size if called from Calculate_DenseMarginals_Fast(), then it is a no-op Eigen::VectorXd perm_column(n); size_t n_block_col = -1, n_col_remains = 1; const size_t nb = r_R.n_BlockColumn_Num(); if(n_start_column) { /*n_block_col = r_R.n_Find_BlockColumn(n_start_column, n_col_remains); _ASSERTE(n_block_col != size_t(-1)); // get a column size_t n_col_base = r_R.n_BlockColumn_Base(n_block_col); _ASSERTE(n_col_base <= n_start_column); // make sure that the right column was found _ASSERTE(n_col_remains > n_start_column - n_col_base); // make sure that the start column is inside the block column n_col_remains -= n_start_column - n_col_base; // get how many columns remain*/ // this seems flawed, can't ignore the ordering if the blocks are not the same size //size_t n_col_base; for(size_t j = 0, n_one_pos = n_start_column; j < n_order_size; ++ j) { size_t n_col = p_inv_order[j]; size_t n_block_base = r_R.n_BlockColumn_Base(n_col); size_t n_block_size = r_R.n_BlockColumn_Column_Num(n_col); if(n_one_pos < n_block_size) { //n_col_base = n_block_base; // unused n_col_remains = n_block_size /*- 1*/ - n_one_pos; // like this, without the "- 1" n_block_col = j; // before permutation! break; } else n_one_pos -= n_block_size; } _ASSERTE(n_block_col != size_t(-1)); // need to find the correct column under the permutation ++ n_col_remains; // will decrement at the beginning of the loop, compensate for that } for(size_t i = n_start_column, _n = std::min(n, n_end_column); i < _n; ++ i) { if(!(-- n_col_remains)) // triggers in the first iteration, loads up column width n_col_remains = r_R.n_BlockColumn_Column_Num(p_inv_order[++ n_block_col]); // can't ignore the order here! // if done before, it avoids both referencing block col 0 in an empty matrix // and determining no. of columns in one past the last block column at the end _ASSERTE(n_block_col < nb); //_ASSERTE(n_block_col + 1 == nb || i + n_col_remains == r_R.n_BlockColumn_Base(n_block_col + 1)); // will only work with a single block size // make sure that the numbers are correct double *p_column = &r_marginals.col((b_band_only)? i - n_start_column : i)(0); #ifdef _DEBUG memset(p_column, 0, n * sizeof(double)); p_column[i] = 1; // make a column vector with a single 1 in it r_R.InversePermute_LeftHandSide_Vector(&perm_column(0), p_column, n, p_inv_order, n_order_size); // this is mostly a waste of time, as the vector is mostly zeros #endif // _DEBUG size_t p = p_inv_order[n_block_col]; // the block column is the one we want size_t n_perm_col = (p + 1 < nb)? r_R.n_BlockColumn_Base(p + 1) - n_col_remains : // quite simple r_R.n_BlockColumn_Base(p) + r_R.n_BlockColumn_Column_Num(p) - n_col_remains; // one more reference _ASSERTE(perm_column(n_perm_col) == 1); // all we need to do is write a single one here // calculate inverse permutation #ifndef _DEBUG perm_column.setZero(); perm_column(n_perm_col) = 1; // form the vector with a single 1 in it already permuted #endif // !_DEBUG r_R.UpperTriangularTranspose_Solve(&perm_column(0), n, p/*n_block_col*/); /*if(b_lower_diag_only) r_R.UpperTriangular_Solve(&perm_column(0), n, n_block_col, r_R.n_BlockColumn_Num() - 1); else*/ r_R.UpperTriangular_Solve(&perm_column(0), n); r_R.Permute_LeftHandSide_Vector(p_column, &perm_column(0), n, p_inv_order, n_order_size); // solve for the whole column thing, generates one column at a time } // 2015-08-17 - seems to work ok } /** * @brief fast FBS function that calculates dense marginals matrix * * @tparam CBlockMatrixTypelist is list of Eigen::Matrix block sizes * * @param[out] r_marginals is filled with the marginals matrix * @param[in] r_R is the Cholesky factor * @param[in] n_start_column is zero-based index of the first column (in elements) * @param[in] n_end_column is zero-based index of one past the last column (in elements) * @param[in] p_inv_order is inverse ordering on the R factor * @param[in] n_order_size is size of the ordering (must match number of block columns in R) * @param[in] b_lower_diag_only is lower-diagonal flag * * @note This function throws std::bad_alloc. */ template <class CBlockMatrixTypelist> static void Calculate_DenseMarginals_Fast_ColumnBand_FBS(Eigen::MatrixXd &r_marginals, const CUberBlockMatrix &r_R, size_t n_start_column, size_t n_end_column, const size_t *p_inv_order, size_t n_order_size, bool b_lower_diag_only = false) // throw(std::bad_alloc) { _ASSERTE(!b_lower_diag_only); // missing code to correctly resume, always calculating full _ASSERTE(p_inv_order); _ASSERTE(n_order_size == r_R.n_BlockColumn_Num()); // make sure the ordering is there and has valid size const size_t n = r_R.n_Column_Num(); // in elements _ASSERTE(n_start_column <= n_end_column); _ASSERTE(n_end_column <= n); // not more than that r_marginals.resize(n, n); // should already have the size if called from Calculate_DenseMarginals_Fast(), then it is a no-op Eigen::VectorXd perm_column(n); enum { b_single_block_size = CTypelistLength<CBlockMatrixTypelist>::n_result == 1, n_first_block_size = fbs_ut::CEigenToDimension<typename CBlockMatrixTypelist::_TyHead>::_TyResult::n_column_num }; // optimize for just a single size in the typelist (compile-time constants) #ifdef _DEBUG const bool _b_single_block_size = b_single_block_size != 0; const int _n_first_block_size = n_first_block_size; #endif // _DEBUG // otherwise not visible to debugger size_t n_block_col = -1, n_col_remains = 1; const size_t nb = r_R.n_BlockColumn_Num(); if(n_start_column) { size_t n_col_base; if(b_single_block_size) { // compile-time constant, should optimize away n_block_col = n_start_column / n_first_block_size; n_col_remains = n_first_block_size; size_t UNUSED_VAR(n_remains_check); _ASSERTE(n_block_col == r_R.n_Find_BlockColumn(n_start_column, n_remains_check)); _ASSERTE(n_remains_check == n_col_remains); n_col_base = n_start_column - n_start_column % n_first_block_size; _ASSERTE(n_col_base == r_R.n_BlockColumn_Base(n_block_col)); // code, optimized for a single block size _ASSERTE(n_block_col != size_t(-1)); _ASSERTE(n_col_base <= n_start_column); // make sure that the right column was found _ASSERTE(n_col_remains > n_start_column - n_col_base); // make sure that the start column is inside the block column n_col_remains -= n_start_column - n_col_base; // get how many columns remain } else { /*n_block_col = r_R.n_Find_BlockColumn(n_start_column, n_col_remains); n_col_base = r_R.n_BlockColumn_Base(n_block_col);*/ // this seems flawed, can't ignore the ordering if the blocks are not the same size for(size_t j = 0, n_one_pos = n_start_column; j < n_order_size; ++ j) { size_t n_col = p_inv_order[j]; size_t n_block_base = r_R.n_BlockColumn_Base(n_col); size_t n_block_size = r_R.n_BlockColumn_Column_Num(n_col); if(n_one_pos < n_block_size) { n_col_base = n_block_base; n_col_remains = n_block_size /*- 1*/ - n_one_pos; // like this, without the "- 1" n_block_col = j; // before permutation! break; } else n_one_pos -= n_block_size; } _ASSERTE(n_block_col != size_t(-1)); // need to find the correct column under the permutation } // get a column ++ n_col_remains; // will decrement at the beginning of the loop, compensate for that } for(size_t i = n_start_column, _n = std::min(n, n_end_column); i < _n; ++ i) { if(!(-- n_col_remains)) { // triggers in the first iteration, loads up column width if(b_single_block_size) { // compile-time constant, should optimize away n_col_remains = n_first_block_size; ++ n_block_col; } else n_col_remains = r_R.n_BlockColumn_Column_Num(p_inv_order[++ n_block_col]); // can't ignore the order here! } // if done before, it avoids both referencing block col 0 in an empty matrix // and determining no. of columns in one past the last block column at the end _ASSERTE(n_block_col < nb); _ASSERTE(n_block_col + 1 == nb || !b_single_block_size || i + n_col_remains == r_R.n_BlockColumn_Base(n_block_col + 1)); // will only work with a single block size // make sure that the numbers are correct double *p_column = &r_marginals.col(i)(0); #ifdef _DEBUG memset(p_column, 0, n * sizeof(double)); p_column[i] = 1; // make a column vector with a single 1 in it r_R.InversePermute_LeftHandSide_Vector(&perm_column(0), p_column, n, p_inv_order, n_order_size); // this is mostly a waste of time, as the vector is mostly zeros #endif // _DEBUG size_t p = p_inv_order[n_block_col]; // the block column is the one we want size_t n_perm_col = (p + 1 < nb)? r_R.n_BlockColumn_Base(p + 1) - n_col_remains : // quite simple r_R.n_BlockColumn_Base(p) + r_R.n_BlockColumn_Column_Num(p) - n_col_remains; // one more reference _ASSERTE(perm_column(n_perm_col) == 1); // all we need to do is write a single one here // calculate inverse permutation #ifndef _DEBUG perm_column.setZero(); perm_column(n_perm_col) = 1; // form the vector with a single 1 in it already permuted #endif // !_DEBUG r_R.UpperTriangularTranspose_Solve_FBS<CBlockMatrixTypelist>(&perm_column(0), n, p/*n_block_col*/); /*if(b_lower_diag_only) { // t_odo - write a prototype for this as well! r_R.UpperTriangular_Solve_FBS<CBlockMatrixTypelist>(&perm_column(0), n, n_block_col, r_R.n_BlockColumn_Num() - 1); } else*/ r_R.UpperTriangular_Solve_FBS<CBlockMatrixTypelist>(&perm_column(0), n); r_R.Permute_LeftHandSide_Vector(p_column, &perm_column(0), n, p_inv_order, n_order_size); // solve for the whole column thing, generates one column at a time } } /** * @brief fast FBS function that calculates a block of a dense marginals matrix * * This calculates a dense subblock of the marginals matrix. It can be smaller than * the full marginals matrix (which has the same size as R). The starting column of * the block is given by n_start_column, and the number of columns is arbitrary * (but not exceeding the size of the marginals). The starting row of the block is always * zero. The number of rows of the block must match the sum of sizes of block rows * selected by p_inv_order and n_smaller_order_size (must be block aligned). * * @tparam CBlockMatrixTypelist is list of Eigen::Matrix block sizes * @tparam Derived0 is Eigen derived matrix type for the first matrix argument * * @param[in,out] r_marginals is filled with the marginals matrix (must come preallocated) * @param[in] r_R is the Cholesky factor * @param[in] n_start_column is zero-based index of the first column (in elements) * @param[in] p_inv_order is inverse ordering on the R factor * @param[in] n_order_size is size of the ordering (must match number of block columns in R) * @param[in] n_smaller_order_size is size of the smaller ordering for the corresponding * block (less or equal to n_order_size) * * @note This function throws std::bad_alloc. * @note This used to be called Calculate_DenseMarginals_Fast_ColumnBand_FBS() which was * unfortunate because the function actually stores only a subblock of the marginals matrix * rather than the dense matrix and the semantics depend on the type of the matrix * (Eigen::MatrixXd for full matrix and anything else for matrix band). Seemed wrong * to call them the same. */ template <class CBlockMatrixTypelist, class Derived0> static void Calculate_SubblockMarginals_Fast_ColumnBand_FBS(Eigen::MatrixBase<Derived0> &r_marginals, const CUberBlockMatrix &r_R, size_t n_start_column, const size_t *p_inv_order, size_t n_order_size, size_t n_smaller_order_size) // throw(std::bad_alloc) { _ASSERTE(p_inv_order); _ASSERTE(n_order_size == r_R.n_BlockColumn_Num()); // make sure the ordering is there and has valid size const size_t n_end_column = n_start_column + r_marginals.cols(); const size_t n = r_R.n_Column_Num(); // in elements _ASSERTE(n_start_column <= n_end_column); _ASSERTE(n_end_column <= n); // not more than that _ASSERTE(size_t(r_marginals.rows()) <= n); // number of rows must be the same or smaller (the tail is not stored) _ASSERTE(r_marginals.cols() == n_end_column - n_start_column); // number of columns is given const size_t n_result_rows = r_marginals.rows(); _ASSERTE(n_smaller_order_size <= n_order_size); // not more //_ASSERTE(n_result_rows <= n); // not more // already checked above _ASSERTE(n_result_rows < n || n_smaller_order_size == n_order_size); // equal, unless less rows than n are requested #ifdef _DEBUG size_t n_smaller_order_size_check = n_order_size; { size_t n_elems = n; while(n_elems > n_result_rows) { _ASSERTE(n_smaller_order_size_check > 0); -- n_smaller_order_size_check; // here size_t n_block_size = r_R.n_BlockColumn_Column_Num(p_inv_order[n_smaller_order_size_check]); _ASSERTE(n_elems >= n_block_size); n_elems -= n_block_size; _ASSERTE(n_elems >= n_result_rows); // must be greater or equal } _ASSERTE(n_elems == n_result_rows); } _ASSERTE(n_smaller_order_size_check == n_smaller_order_size); // just makes sure that n_smaller_order_size is calculated correctly #endif // _DEBUG Eigen::VectorXd perm_column(n); enum { b_single_block_size = CTypelistLength<CBlockMatrixTypelist>::n_result == 1, n_first_block_size = fbs_ut::CEigenToDimension<typename CBlockMatrixTypelist::_TyHead>::_TyResult::n_column_num }; // optimize for just a single size in the typelist (compile-time constants) #ifdef _DEBUG const bool _b_single_block_size = b_single_block_size != 0; const int _n_first_block_size = n_first_block_size; #endif // _DEBUG // otherwise not visible to debugger size_t n_block_col = size_t(-1), n_col_remains = 1; const size_t nb = r_R.n_BlockColumn_Num(); if(n_start_column) { size_t n_col_base; if(b_single_block_size) { // compile-time constant, should optimize away n_block_col = n_start_column / n_first_block_size; n_col_remains = n_first_block_size; size_t UNUSED_VAR(n_remains_check); _ASSERTE(n_block_col == r_R.n_Find_BlockColumn(n_start_column, n_remains_check)); _ASSERTE(n_remains_check == n_col_remains); n_col_base = n_start_column - n_start_column % n_first_block_size; _ASSERTE(n_col_base == r_R.n_BlockColumn_Base(n_block_col)); // code, optimized for a single block size _ASSERTE(n_block_col != size_t(-1)); _ASSERTE(n_col_base <= n_start_column); // make sure that the right column was found _ASSERTE(n_col_remains > n_start_column - n_col_base); // make sure that the start column is inside the block column n_col_remains -= n_start_column - n_col_base; // get how many columns remain } else { /*n_block_col = r_R.n_Find_BlockColumn(n_start_column, n_col_remains); n_col_base = r_R.n_BlockColumn_Base(n_block_col);*/ // this seems flawed, can't ignore the ordering if the blocks are not the same size for(size_t j = 0, n_one_pos = n_start_column; j < n_order_size; ++ j) { size_t n_col = p_inv_order[j]; size_t n_block_base = r_R.n_BlockColumn_Base(n_col); size_t n_block_size = r_R.n_BlockColumn_Column_Num(n_col); if(n_one_pos < n_block_size) { n_col_base = n_block_base; n_col_remains = n_block_size /*- 1*/ - n_one_pos; // like this, without the "- 1" n_block_col = j; // before permutation! break; } else n_one_pos -= n_block_size; } _ASSERTE(n_block_col != size_t(-1)); // need to find the correct column under the permutation } // get a column ++ n_col_remains; // will decrement at the beginning of the loop, compensate for that } for(size_t i = n_start_column, _n = std::min(n, n_end_column); i < _n; ++ i) { if(!(-- n_col_remains)) { // triggers in the first iteration, loads up column width if(b_single_block_size) { // compile-time constant, should optimize away n_col_remains = n_first_block_size; ++ n_block_col; } else n_col_remains = r_R.n_BlockColumn_Column_Num(p_inv_order[++ n_block_col]); // can't ignore the order here! } // if done before, it avoids both referencing block col 0 in an empty matrix // and determining no. of columns in one past the last block column at the end _ASSERTE(n_block_col < nb); _ASSERTE(n_block_col + 1 == nb || !b_single_block_size || i + n_col_remains == r_R.n_BlockColumn_Base(n_block_col + 1)); // this only works if b_single_block_size // make sure that the numbers are correct double *p_column = &r_marginals.col(i - n_start_column)(0); #ifdef _DEBUG Eigen::VectorXd dbg_column(n); dbg_column.setZero(); dbg_column(i) = 1; // make a column vector with a single 1 in it r_R.InversePermute_LeftHandSide_Vector(&perm_column(0), &dbg_column(0), n, p_inv_order, n_order_size); // this is mostly a waste of time, as the vector is mostly zeros size_t n_correct_perm = std::find(&perm_column(0), &perm_column(0) + perm_column.rows(), 1.0) - &perm_column(0); // sometimes it is not obvious why it does not work #endif // _DEBUG size_t p, n_perm_col; //if(b_single_block_size) { // compile-time constant, should optimize away p = p_inv_order[n_block_col]; // the block column is the one we want n_perm_col = (p + 1 < nb)? r_R.n_BlockColumn_Base(p + 1) - n_col_remains : // quite simple r_R.n_BlockColumn_Base(p) + r_R.n_BlockColumn_Column_Num(p) - n_col_remains; // one more reference // this is not the problem, and works even with multiple block sizes; calculation of n_block_col was the problem /*} else { p = size_t(-1); for(size_t j = 0, n_one_pos = i; j < n_order_size; ++ j) { size_t n_col = p_inv_order[j]; size_t n_block_base = r_R.n_BlockColumn_Base(n_col); size_t n_block_size = r_R.n_BlockColumn_Column_Num(n_col); if(n_one_pos < n_block_size) { n_perm_col = n_block_base + n_one_pos; p = n_col; break; } else n_one_pos -= n_block_size; } _ASSERTE(p != size_t(-1)); // find where the one will be after the permutation (should still be faster // than reading & writing the vector in InversePermute_LeftHandSide_Vector()) }*/ _ASSERTE(perm_column(n_perm_col) == 1); // all we need to do is write a single one here (checks if the above _DEBUG block calculated it, otherwise it is uninitialized) // calculate inverse permutation perm_column.setZero(); perm_column(n_perm_col) = 1; // form the vector with a single 1 in it already permuted (use this in release) r_R.UpperTriangularTranspose_Solve_FBS<CBlockMatrixTypelist>(&perm_column(0), n, p/*n_block_col*/); // always resumed #ifdef _DEBUG dbg_column.setZero(); dbg_column(n_perm_col) = 1; r_R.UpperTriangularTranspose_Solve_FBS<CBlockMatrixTypelist>(&dbg_column(0), n); // make sure that the resume point is calculated correctly _ASSERTE(dbg_column == perm_column); // should be bit-by-bit identical #endif // _DEBUG /*if(b_lower_diag_only) { // t_odo - write a prototype for this as well! r_R.UpperTriangular_Solve_FBS<CBlockMatrixTypelist>(&perm_column(0), n, n_block_col, r_R.n_BlockColumn_Num() - 1); } else*/ r_R.UpperTriangular_Solve_FBS<CBlockMatrixTypelist>(&perm_column(0), n/*, 0, / *n_smaller_order_size,* / r_R.n_BlockColumn_Num() - 1*/); // t_odo - use n_smaller_order_size here // can't, would have to see where the blocks are ordered, and only if at the beginning / end, could columns be skipped r_R.Permute_LeftHandSide_Vector(p_column, &perm_column(0), n_result_rows, p_inv_order, n_smaller_order_size); // solve for the whole column thing, generates one column at a time } } /** * @brief fast FBS function that calculates dense marginals matrix * * @tparam CMatrixBlockSizeList is a list of possible matrix block sizes * * @param[out] r_marginals is filled with the marginals matrix * @param[in] r_R is the Cholesky factor * @param[in] p_inv_order is inverse ordering on the R factor * @param[in] n_order_size is size of the ordering (must match number of block columns in R) * * @note This function throws std::bad_alloc. */ template <class CBlockMatrixTypelist> static void Calculate_DenseMarginals_Fast_FBS(Eigen::MatrixXd &r_marginals, const CUberBlockMatrix &r_R, const size_t *p_inv_order, size_t n_order_size) // throw(std::bad_alloc) { Calculate_DenseMarginals_Fast_ColumnBand_FBS<CBlockMatrixTypelist>(r_marginals, r_R, 0, r_R.n_Column_Num(), p_inv_order, n_order_size, false); // no need for 2nd implementation, just call this /*r_marginals.triangularView<Eigen::StrictlyUpper>() = r_marginals.triangularView<Eigen::StrictlyLower>().transpose();*/ // transpose elements below diagonal to elements above it } /** * @brief reference function that calculates dense marginals matrix * * @param[out] r_marginals is filled with the marginals matrix * @param[in] r_R is the Cholesky factor * @param[in] p_inv_order is pointer to the inverse ordering which was used to calculate r_R * @param[in] n_order_size is size of the ordering, in blocks * * @note This function throws std::bad_alloc. */ static void Calculate_DenseMarginals_Fast(Eigen::MatrixXd &r_marginals, const CUberBlockMatrix &r_R, const size_t *p_inv_order, size_t n_order_size) // throw(std::bad_alloc) { #if 1 Calculate_DenseMarginals_Fast_ColumnBand(r_marginals, r_R, 0, r_R.n_Column_Num(), p_inv_order, n_order_size, false); // no need for 2nd implementation, just call this /*r_marginals.triangularView<Eigen::StrictlyUpper>() = r_marginals.triangularView<Eigen::StrictlyLower>().transpose();*/ // transpose elements below diagonal to elements above it #else // 1 _ASSERTE(p_inv_order); _ASSERTE(n_order_size == r_R.n_BlockColumn_Num()); // make sure the ordering is there and has valid size const size_t n = r_R.n_Column_Num(); // in elements r_marginals.resize(n, n); Eigen::VectorXd perm_column(n); size_t n_block_col = -1, n_col_remains = 1; const size_t nb = r_R.n_BlockColumn_Num(); for(size_t i = 0; i < n; ++ i) { if(!(-- n_col_remains)) // triggers in the first iteration, loads up column width n_col_remains = r_R.n_BlockColumn_Column_Num(++ n_block_col); // if done before, it avoids both referencing block col 0 in an empty matrix // and determining no. of columns in one past the last block column at the end _ASSERTE(n_block_col < nb); //_ASSERTE(n_block_col + 1 == nb || i + n_col_remains == r_R.n_BlockColumn_Base(n_block_col + 1)); // only works with single block size // make sure that the numbers are correct double *p_column = &r_marginals.col(i)(0); #ifdef _DEBUG memset(p_column, 0, n * sizeof(double)); p_column[i] = 1; // make a column vector with a single 1 in it r_R.InversePermute_LeftHandSide_Vector(&perm_column(0), p_column, n, p_inv_order, n_order_size); // this is mostly a waste of time, as the vector is mostly zeros #endif // _DEBUG size_t p = p_inv_order[n_block_col]; // the block column is the one we want size_t n_perm_col = (p + 1 < nb)? r_R.n_BlockColumn_Base(p + 1) - n_col_remains : // quite simple r_R.n_BlockColumn_Base(p) + r_R.n_BlockColumn_Column_Num(p) - n_col_remains; // one more reference _ASSERTE(perm_column(n_perm_col) == 1); // all we need to do is write a single one here // calculate inverse permutation #ifndef _DEBUG perm_column.setZero(); perm_column(n_perm_col) = 1; // form the vector with a single 1 in it already permuted #endif // !_DEBUG r_R.UpperTriangularTranspose_Solve(&perm_column(0), n, p/*n_block_col*/); r_R.UpperTriangular_Solve(&perm_column(0), n/*, n_block_col, r_R.n_BlockColumn_Num() - 1*/); // only calculates valid items below the diagonal r_R.Permute_LeftHandSide_Vector(p_column, &perm_column(0), n, p_inv_order, n_order_size); // solve for the whole column thing, generates one column at a time } #endif // 1 } /** * @brief fast parallel function that calculates dense marginals matrix * * @param[out] r_marginals is filled with the marginals matrix * @param[in] r_R is the Cholesky factor * @param[in] p_inv_order is inverse ordering on the R factor * @param[in] n_order_size is size of the ordering (must match number of block columns in R) * * @note This function throws std::bad_alloc. */ static void Calculate_DenseMarginals_Fast_Parallel(Eigen::MatrixXd &r_marginals, const CUberBlockMatrix &r_R, const size_t *p_inv_order, size_t n_order_size) // throw(std::bad_alloc) { _ASSERTE(p_inv_order); _ASSERTE(n_order_size == r_R.n_BlockColumn_Num()); // make sure the ordering is there and has valid size const size_t n = r_R.n_Column_Num(); // in elements r_marginals.resize(n, n); #ifdef _OPENMP #pragma omp parallel { int n_tid = omp_get_thread_num(); int n_thread_num = omp_get_num_threads(); size_t n_start = n_tid * (n / n_thread_num); size_t n_end = (n_tid + 1 < n_thread_num)? n_start + n / n_thread_num : n; // split to bands to be processed in parallel Calculate_DenseMarginals_Fast_ColumnBand(r_marginals, r_R, n_start, n_end, p_inv_order, n_order_size, false); // process in parallel } // calculate the lower-triangular marginals /*r_marginals.triangularView<Eigen::StrictlyUpper>() = r_marginals.triangularView<Eigen::StrictlyLower>().transpose();*/ // transpose elements below diagonal to elements above it #else // _OPENMP Calculate_DenseMarginals_Fast(r_marginals, r_R, p_inv_order, n_order_size); // otherwise use the serial section #endif // _OPENMP } /** * @brief fast parallel FBS function that calculates dense marginals matrix * * @tparam CMatrixBlockSizeList is a list of possible matrix block sizes * * @param[out] r_marginals is filled with the marginals matrix * @param[in] r_R is the Cholesky factor * @param[in] p_inv_order is inverse ordering on the R factor * @param[in] n_order_size is size of the ordering (must match number of block columns in R) * * @note This function throws std::bad_alloc. */ template <class CBlockMatrixTypelist> static void Calculate_DenseMarginals_Fast_Parallel_FBS(Eigen::MatrixXd &r_marginals, const CUberBlockMatrix &r_R, const size_t *p_inv_order, size_t n_order_size) // throw(std::bad_alloc) { _ASSERTE(p_inv_order); _ASSERTE(n_order_size == r_R.n_BlockColumn_Num()); // make sure the ordering is there and has valid size const size_t n = r_R.n_Column_Num(); // in elements r_marginals.resize(n, n); #ifdef _OPENMP #pragma omp parallel { int n_tid = omp_get_thread_num(); int n_thread_num = omp_get_num_threads(); size_t n_start = n_tid * (n / n_thread_num); size_t n_end = (n_tid + 1 < n_thread_num)? n_start + n / n_thread_num : n; // split to bands to be processed in parallel Calculate_DenseMarginals_Fast_ColumnBand_FBS<CBlockMatrixTypelist>(r_marginals, r_R, n_start, n_end, p_inv_order, n_order_size, false); // process in parallel } // calculate the lower-triangular marginals /*r_marginals.triangularView<Eigen::StrictlyUpper>() = r_marginals.triangularView<Eigen::StrictlyLower>().transpose();*/ // transpose elements below diagonal to elements above it #else // _OPENMP Calculate_DenseMarginals_Fast_FBS<CBlockMatrixTypelist>(r_marginals, r_R, p_inv_order, n_order_size); // otherwise use the serial section #endif // _OPENMP } /** * @brief fast right column band function that calculates dense marginals matrix * * @param[out] r_marginals is filled with the marginals matrix * @param[in] r_R is the Cholesky factor * @param[in] n_column_num is number of columns to calculate (in elements) * @param[in] p_inv_order is inverse ordering on the R factor * @param[in] n_order_size is size of the ordering (must match number of block columns in R) * * @note This function throws std::bad_alloc. */ static void Calculate_DenseMarginals_LastNColumns_Fast(Eigen::MatrixXd &r_marginals, const CUberBlockMatrix &r_R, size_t n_column_num, const size_t *p_inv_order, size_t n_order_size) // throw(std::bad_alloc) { const size_t n = r_R.n_Column_Num(); // in elements return Calculate_DenseMarginals_Fast_ColumnBand(r_marginals, r_R, n - n_column_num, n, p_inv_order, n_order_size); /*r_marginals.resize(n, n); for(size_t i = 0, n_block_col = -1, n_col_remains = 1; i < n; ++ i) { if(!(-- n_col_remains)) // triggers in the first iteration, loads up column width n_col_remains = r_R.n_BlockColumn_Column_Num(++ n_block_col); // if done before, it avoids both referencing block col 0 in an empty matrix // and determining no. of columns in one past the last block column at the end if(n > n_column_num - 1 && i < n - n_column_num - 1) continue; // skip the prefix columns (todo - just call r_R.n_Find_BlockColumn()) double *p_column = &r_marginals.col(i)(0); memset(p_column, 0, n * sizeof(double)); p_column[i] = 1; // make a column vector with a single 1 in it r_R.UpperTriangularTranspose_Solve(p_column, n, n_block_col); r_R.UpperTriangular_Solve(p_column, n/ *, n_block_col* /); // solve for the whole column thing, generates one column at a time }*/ } /** * @brief fast FBS right column band function that calculates dense marginals matrix * * @tparam CMatrixBlockSizeList is a list of possible matrix block sizes * * @param[out] r_marginals is filled with the marginals matrix * @param[in] r_R is the Cholesky factor * @param[in] n_column_num is number of columns to calculate (in elements) * @param[in] p_inv_order is inverse ordering on the R factor * @param[in] n_order_size is size of the ordering (must match number of block columns in R) * * @note This function throws std::bad_alloc. */ template <class CBlockMatrixTypelist> static void Calculate_DenseMarginals_LastNColumns_Fast_FBS(Eigen::MatrixXd &r_marginals, const CUberBlockMatrix &r_R, size_t n_column_num, const size_t *p_inv_order, size_t n_order_size) // throw(std::bad_alloc) { const size_t n = r_R.n_Column_Num(); // in elements return Calculate_DenseMarginals_Fast_ColumnBand_FBS<CBlockMatrixTypelist>(r_marginals, r_R, n - n_column_num, n, p_inv_order, n_order_size); } /** * @brief slow column band function that calculates dense marginals matrix * * @param[out] r_marginals is filled with the marginals matrix * @param[in] r_R is the Cholesky factor * @param[in] n_column_num is number of columns to calculate (in elements) * * @note This function throws std::bad_alloc. */ static void Calculate_DenseMarginals_LastNColumns_Slow(Eigen::MatrixXd &r_marginals, const CUberBlockMatrix &r_R, size_t n_column_num) // throw(std::bad_alloc) { const size_t n = r_R.n_Column_Num(); // in elements r_marginals.resize(n, n); r_marginals.setZero(); // !! Eigen::MatrixXd R_inv_column(n, 1); // R_inv = S double *p_column = &R_inv_column.col(0)(0); // get dense column data from the Eigen matrix (actually only need one) for(size_t i = 0, n_block_col = -1, n_col_remains = 1; i < n; ++ i) { if(!(-- n_col_remains)) // triggers in the first iteration, loads up column width n_col_remains = r_R.n_BlockColumn_Column_Num(++ n_block_col); // if done before, it avoids both referencing block col 0 in an empty matrix // and determining no. of columns in one past the last block column at the end if(n > n_column_num - 1 && i < n - n_column_num - 1) continue; // skip the prefix columns (todo - just call memset(p_column, 0, n * sizeof(double)); p_column[i] = 1; // make a column vector with a single 1 in it size_t UNUSED_VAR(n_block_column_size); _ASSERTE(n_block_col == r_R.n_Find_BlockColumn(i, n_block_column_size)); // should be the same // get which block column contains column i (optimize this away, probably need to use it when resuming) //_ASSERTE(n_block_column_size <= n_diag_band_width); // make this into a run-time check in the production code // make sure it is not longer than the diagonal (otherwise we will not have enough backlog to calculate all the off-diagonal elements) r_R.UpperTriangular_Solve(p_column, n, n_block_col); // backsub, only the nonzero part of the column (started at (block) column which contains column i, with no loss of generality) // this seems to be O(i) divisions + O(nnz) MADs in the given (block) column range // that sums up to O(n^2/2) divisions + O(nnz log(nnz))?? MADs ... some quadratute anyways #ifdef _DEBUG std::vector<double> backsub_test(n, 0); backsub_test[i] = 1; // !! r_R.UpperTriangular_Solve(&backsub_test[0], n); // full backsub _ASSERTE(!memcmp(p_column, &backsub_test[0], n * sizeof(double))); // make sure that the result is correct #endif // _DEBUG _ASSERTE((Eigen::Map<Eigen::VectorXd, Eigen::Unaligned>(p_column + i + 1, n - i - 1).norm() == 0)); // double pars required because of the comma in Map params // everything below i is zero (true) for(size_t k = n - n_column_num; k <= i; ++ k) { for(size_t j = 0; j <= i; ++ j) r_marginals(j, k) += p_column[j] * p_column[k]; // it is symmetric, indexing arbitrary } // accumulate the entries of the covariace matrix. this is O(n^3/2) MADs for the full matrix // note that to calculate even only the diagonal, we need full columns } } /** * @brief FBS kernel for marginal covariance calculation */ // todo - carry all the improvements to the non-FBS version (especially the sparsification and the inverse diagonal calculation) class CSparseBlockMarginals_Recurrent_FBSKernel { // todo - fill throws public: /** * @brief a simple wrapper for block lookup in marginals calculation */ struct TBlockMatrixLookup { CUberBlockMatrix &r_marginals; /**< @brief reference to the output matrix */ #ifdef __MARGINALS_RECURRENT_KERNEL_USE_DENSE_BLOCK_LOOKUP std::vector<double*> block_list; /**< @brief block data lookup @note Potentially big; for 10k and x64, it is 762 MB. Troubling, but still, full dense matrix in 10k would be 6.7 GB so it is one order of magnitude better. */ #endif // __MARGINALS_RECURRENT_KERNEL_USE_DENSE_BLOCK_LOOKUP #ifdef _DEBUG const Eigen::MatrixXd *p_ground_truth; /**< @brief ground truth (if available) */ #endif // _DEBUG /** * @brief default constructor; binds to an empty block matrix * @param[in] _r_marginals is reference to the output matrix */ TBlockMatrixLookup(CUberBlockMatrix &_r_marginals) :r_marginals(_r_marginals) #ifdef __MARGINALS_RECURRENT_KERNEL_USE_DENSE_BLOCK_LOOKUP , block_list(_r_marginals.n_BlockColumn_Num() * _r_marginals.n_BlockColumn_Num(), 0) // is symmetric #endif // __MARGINALS_RECURRENT_KERNEL_USE_DENSE_BLOCK_LOOKUP #ifdef _DEBUG , p_ground_truth(0) #endif // _DEBUG { _ASSERTE(r_marginals.n_BlockColumn_Num() == r_marginals.n_BlockRow_Num()); _ASSERTE(r_marginals.b_SymmetricLayout()); // must be symmetric _ASSERTE(!r_marginals.n_Block_Num()); // must be empty } /** * @brief gets an existing block from the marginals matrix * * @param[in] n_block_row is zero-based index of block row * @param[in] n_block_column is zero-based index of block column * @param[in] n_block_row_num is number of rows in the block * @param[in] n_block_col_num is number of columns in the block * * @return Returns pointer to block data. */ const double *p_GetExistingBlock(size_t n_block_row, size_t n_block_column, int n_block_row_num, int n_block_col_num) const { _ASSERTE(n_block_row_num == r_marginals.n_BlockColumn_Column_Num(n_block_row)); _ASSERTE(n_block_col_num == r_marginals.n_BlockColumn_Column_Num(n_block_column)); #ifndef __MARGINALS_RECURRENT_KERNEL_USE_DENSE_BLOCK_LOOKUP return r_marginals.p_GetBlock_Log(n_block_row, n_block_column, n_block_row_num/*r_marginals.n_BlockColumn_Column_Num(n_block_row)*/, n_block_col_num/*r_marginals.n_BlockColumn_Column_Num(n_block_column)*/); #else // __MARGINALS_RECURRENT_KERNEL_USE_DENSE_BLOCK_LOOKUP _ASSERTE(n_block_row <= n_block_column); // upper triangular _ASSERTE(n_block_column <= r_marginals.n_BlockColumn_Num()); _ASSERTE(n_block_row <= r_marginals.n_BlockColumn_Num()); size_t n_index = n_block_column * r_marginals.n_BlockRow_Num() + n_block_row; _ASSERTE(block_list[n_index]); // should already be there return block_list[n_index]; #endif // __MARGINALS_RECURRENT_KERNEL_USE_DENSE_BLOCK_LOOKUP } /** * @brief allocates a new block in the marginals matrix * * @param[in] n_block_row is zero-based index of block row * @param[in] n_block_column is zero-based index of block column * @param[in] n_block_row_num is number of rows in the block * @param[in] n_block_col_num is number of columns in the block * * @return Returns pointer to block data. * @note This function throws std::bad_alloc. */ double *p_GetNewBlock(size_t n_block_row, size_t n_block_column, int n_block_row_num, int n_block_col_num) // throw(std::bad_alloc) { _ASSERTE(n_block_row_num == r_marginals.n_BlockColumn_Column_Num(n_block_row)); _ASSERTE(n_block_col_num == r_marginals.n_BlockColumn_Column_Num(n_block_column)); #ifndef __MARGINALS_RECURRENT_KERNEL_USE_DENSE_BLOCK_LOOKUP return r_marginals.p_GetBlock_Log(n_block_row, n_block_column, n_block_row_num/*r_marginals.n_BlockColumn_Column_Num(n_block_row)*/, n_block_col_num/*r_marginals.n_BlockColumn_Column_Num(n_block_column)*/, true, false); // alloc, don't mind uninit block #else // __MARGINALS_RECURRENT_KERNEL_USE_DENSE_BLOCK_LOOKUP _ASSERTE(n_block_row <= n_block_column); // upper triangular _ASSERTE(n_block_column <= r_marginals.n_BlockColumn_Num()); _ASSERTE(n_block_row <= r_marginals.n_BlockColumn_Num()); size_t n_index = n_block_column * r_marginals.n_BlockRow_Num() + n_block_row; _ASSERTE(!block_list[n_index]); // should really be new, not existing (can safely ignore if this assertion fires, though) double *p_result = r_marginals.p_GetBlock_Log(n_block_row, n_block_column, n_block_row_num/*r_marginals.n_BlockColumn_Column_Num(n_block_row)*/, n_block_col_num/*r_marginals.n_BlockColumn_Column_Num(n_block_column)*/, true, false); block_list[n_index] = p_result; // save it // don't mind uninit block return p_result; #endif // __MARGINALS_RECURRENT_KERNEL_USE_DENSE_BLOCK_LOOKUP } }; /** * @brief call context of the outer loop */ struct TOuterContext { size_t n_column_i; /**< @brief zero-based index of the current column */ const CUberBlockMatrix &L; /**< @brief reference to the L factor matrix */ const CUberBlockMatrix &R; /**< @brief reference to the R factor matrix (only its block structure is relevant) */ TBlockMatrixLookup &r_marginals; /**< @brief reference to the (output) marginals matrix */ const Eigen::VectorXd &r_inv_diag_L; /**< @brief reference to the precalculated elementwise inverse of the L factor diagonal */ const int n_column_part; /**< @brief one of 0 = diagonal only, 1 = copy structure of R, 2 = full column */ /** * @brief default constructor; fills the context * * @param[in] _n_column_i is zero-based index of the current column * @param[in] _L is reference to the L factor matrix * @param[in] _R is reference to the R factor matrix (only its block structure is relevant) * @param[in] _r_marginals is reference to the (output) marginals matrix * @param[in] _r_inv_diag_L is reference to the precalculated elementwise * inverse of the L factor diagonal * @param[in] _n_column_part is one of 0 = diagonal only, 1 = copy structure of R, 2 = full column */ inline TOuterContext(size_t _n_column_i, const CUberBlockMatrix &_L, const CUberBlockMatrix &_R, TBlockMatrixLookup &_r_marginals, const Eigen::VectorXd &_r_inv_diag_L, int _n_column_part) :n_column_i(_n_column_i), L(_L), R(_R), r_marginals(_r_marginals), r_inv_diag_L(_r_inv_diag_L), n_column_part(_n_column_part) {} }; /** * @brief call context of the off-diagonal middle loop */ struct TMiddleOffDiagContext : public TOuterContext { // t_odo - remove the second "middle" size_t n_column_j; /**< @brief zero-based index of the current row (or column in R) */ /** * @brief default constructor; fills the context * * @param[in] t_ctx is the outer loop context (required in all the loops) * @param[in] _n_column_j is zero-based index of the current row (or column in R) */ inline TMiddleOffDiagContext(TOuterContext t_ctx, size_t _n_column_j) :TOuterContext(t_ctx), n_column_j(_n_column_j) {} }; /** * @brief call context of the blockwise diagonal inner loop */ struct TInnerDiagContext_Blocky : public TOuterContext { size_t j; /**< @brief zero-based index of the current block in the current column */ size_t n_row_j; /**< @brief zero-based row index of the current block */ double *p_dest_block; /**< @brief pointer to the block accumulator (output of the loop) */ /** * @brief default constructor; fills the context * * @param[in] t_ctx is the outer loop context (required in all the loops) * @param[in] _j is zero-based index of the current block in the current column * @param[in] _n_row_j is zero-based row index of the current block * @param[in] _p_dest_block is pointer to the block accumulator (output of the loop) */ inline TInnerDiagContext_Blocky(TOuterContext t_ctx, size_t _j, size_t _n_row_j, double *_p_dest_block) :TOuterContext(t_ctx), j(_j), n_row_j(_n_row_j), p_dest_block(_p_dest_block) {} }; /** * @brief call context of the elementwise diagonal inner loop */ struct TInnerDiagContext_Elem : public TOuterContext { size_t j; /**< @brief zero-based index of the current block in the current column */ size_t n_row_j; /**< @brief zero-based row index of the current block */ size_t n_elem; /**< @brief zero-based index of the destination column in the current block */ size_t n_elem2; /**< @brief zero-based index of the destination row in the current block */ double &r_f_diag_sum; /**< @brief reference to the accumulator (output of the loop) */ /** * @brief default constructor; fills the context * * @param[in] t_ctx is the outer loop context (required in all the loops) * @param[in] _j is zero-based index of the current block in the current column * @param[in] _n_row_j is zero-based row index of the current block * @param[in] _n_elem is zero-based index of the destination column in the current block * @param[in] _n_elem2 is zero-based index of the destination row in the current block * @param[in] _r_f_diag_sum is reference to the accumulator (output of the loop) */ inline TInnerDiagContext_Elem(TOuterContext t_ctx, size_t _j, size_t _n_row_j, size_t _n_elem, size_t _n_elem2, double &_r_f_diag_sum) :TOuterContext(t_ctx), j(_j), n_row_j(_n_row_j), n_elem(_n_elem), n_elem2(_n_elem2), r_f_diag_sum(_r_f_diag_sum) {} }; /** * @brief call context of the off-diagonal inner loop */ struct TInnerOffDiagContext : public TMiddleOffDiagContext { size_t k; /**< @brief zero-based index of the current block in the current column */ size_t n_row_k; /**< @brief zero-based row index of the current block */ double *p_dest_block; /**< @brief pointer to the block accumulator (output of the loop) */ /** * @brief default constructor; fills the context * * @param[in] t_ctx is the middle loop context (required inside of this loops) * @param[in] _k is zero-based index of the current block in the current column * @param[in] _n_row_k is zero-based row index of the current block * @param[in] _p_dest_block is pointer to the block accumulator (output of the loop) */ inline TInnerOffDiagContext(TMiddleOffDiagContext t_ctx, /*size_t _n_column_j,*/ size_t _k, size_t _n_row_k, double *_p_dest_block) :TMiddleOffDiagContext(t_ctx), n_row_k(_n_row_k), /*n_column_j(_n_column_j),*/ k(_k), p_dest_block(_p_dest_block) {} }; /** * @brief diagonal inner loop body; calculates a dot product contribution * of a single depending block (to diagonal block of the marginals matrix) * @tparam n_row_j_size is size of the row (or column) j */ template <const int n_row_j_size, class CColumnISize> struct TDiagInnerLoop_Blocky { /** * @brief diagonal inner loop body implementation * @param[in] t_ctx is loop context */ static inline void Do(TInnerDiagContext_Blocky t_ctx) { enum { n_column_i_size = CColumnISize::n_size }; const CUberBlockMatrix &L = t_ctx.L; TBlockMatrixLookup &r_marginals = t_ctx.r_marginals; const size_t n_column_i = t_ctx.n_column_i; const size_t j = t_ctx.j; const size_t n_row_j = t_ctx.n_row_j; typename CUberBlockMatrix::CMakeMatrixRef<n_column_i_size, n_column_i_size>::_Ty block_i_i_mar(t_ctx.p_dest_block); // unwrap the contexts typename CUberBlockMatrix::CMakeMatrixRef<n_row_j_size, n_column_i_size>::_TyConst block_j_i = L.t_Block_AtColumn<n_row_j_size, n_column_i_size>(n_column_i, j); // todo - rename to block_k_j, check for similar mistakes _ASSERTE(n_row_j > n_column_i); // make sure it is in lower tri (always should) typename CUberBlockMatrix::CMakeMatrixRef<n_column_i_size, n_row_j_size>::_TyConst block_i_j_mar(r_marginals.p_GetExistingBlock(n_column_i, n_row_j, n_column_i_size, n_row_j_size)); // get the corresponding block in the marginals (already calculated at this moment) //block_i_i_mar.noalias() += (block_i_j_mar * block_j_i).transpose(); // 10.36 on 10k //block_i_i_mar.noalias() += block_j_i.transpose() * block_i_j_mar.transpose(); // 10.38 on 10k //block_i_i_mar += block_i_j_mar.lazyProduct(block_j_i).transpose(); // 10.43 on 10k block_i_i_mar += block_j_i.transpose().lazyProduct(block_i_j_mar.transpose()); // 10.35 on 10k // t_odo - try with lazyProduct(), try reverse order product and transpose the factors themselves // add dot of one column of the block with span of the current column of the marginals } }; /** * @brief diagonal inner loop body; calculates a single dot product contribution * of a single column of a single off-diagonal block to a single diagonal block element * * @tparam n_row_j_size is number of rows of the contributing block * @tparam CColumnISize is number of rows (columns) of the diagonal block */ template <const int n_row_j_size, class CColumnISize> struct TDiagInnerLoop_Elem { /** * @brief diagonal inner loop body implementation * @param[in] t_ctx is loop context */ static inline void Do(TInnerDiagContext_Elem t_ctx) { enum { n_column_i_size = CColumnISize::n_size // make this variable-like }; const size_t n_column_i = t_ctx.n_column_i; const CUberBlockMatrix &L = t_ctx.L; TBlockMatrixLookup &r_marginals = t_ctx.r_marginals; const size_t j = t_ctx.j; const size_t n_row_j = t_ctx.n_row_j; const size_t n_elem = t_ctx.n_elem; const size_t n_elem2 = t_ctx.n_elem2; // unwrap the contexts typename CUberBlockMatrix::CMakeMatrixRef<n_row_j_size, n_column_i_size>::_TyConst block_i_j = L.t_Block_AtColumn<n_row_j_size, n_column_i_size>(n_column_i, j); _ASSERTE(n_row_j_size == block_i_j.rows()); // look up the row in L typename CUberBlockMatrix::CMakeMatrixRef<n_column_i_size, n_row_j_size>::_TyConst block_i_j_mar(r_marginals.p_GetExistingBlock(n_column_i, n_row_j, n_column_i_size, n_row_j_size)); // get the corresponding block in the marginals (already calculated at this moment) // note that (n_row_j, n_column_i) would be lower diagonal, here we access // the transpose block accross the diagonal _ASSERTE(n_row_j > n_column_i); // make sure the below dot product will sample uper diagonal of the marginals t_ctx.r_f_diag_sum += block_i_j_mar.row(n_elem).dot(block_i_j.col(n_elem2)); // t_odo - _FBS it // add dot of one column of the block with span of the current column of the marginals } }; /** * @brief off-diagonal inner loop body; calculates a dot product contribution * of a single depending block (to off-diagonal block of the marginals matrix) * @tparam n_row_k_size is size of the row (or column) k */ template <const int n_row_k_size, class CContext2> struct TOffDiagInnerLoop { /** * @brief off-diagonal inner loop body implementation * @param[in] t_ctx is loop context */ static inline void Do(TInnerOffDiagContext t_ctx) { enum { n_column_j_size = CContext2::_TyHead::n_row_num, n_column_i_size = CContext2::_TyHead::n_column_num, b_upper_tri = CContext2::_TyTail::_TyHead::b_flag }; const CUberBlockMatrix &L = t_ctx.L; TBlockMatrixLookup &r_marginals = t_ctx.r_marginals; const size_t n_column_i = t_ctx.n_column_i; const size_t n_column_j = t_ctx.n_column_j; const size_t k = t_ctx.k; const size_t n_row_k = t_ctx.n_row_k;//L.n_Block_Row(n_column_j, k); // todo - pass this through the context typename CUberBlockMatrix::CMakeMatrixRef<n_column_j_size, n_column_i_size>::_Ty block_j_i_mar(t_ctx.p_dest_block); // unwrap the contexts if(b_upper_tri) { // compile-time constant; should optimize away typename CUberBlockMatrix::CMakeMatrixRef<n_row_k_size, n_column_j_size>::_TyConst block_j_k = L.t_Block_AtColumn<n_row_k_size, n_column_j_size>(n_column_j, k); _ASSERTE(n_row_k <= n_column_i); // make sure it is in upper tri (don't mind the diagonal) typename CUberBlockMatrix::CMakeMatrixRef<n_row_k_size, n_column_i_size>::_TyConst block_i_k_mar(r_marginals.p_GetExistingBlock(n_row_k, n_column_i, n_row_k_size, n_column_i_size)); // get the corresponding block in the marginals (already calculated at this moment) //block_j_i_mar.noalias() += block_j_k.transpose() * block_i_k_mar; // 10.35 on 10k block_j_i_mar += block_j_k.transpose().lazyProduct(block_i_k_mar); // 10.29 on 10k // t_odo - try with lazyProduct() // add dot of one column of the block with span of the current column of the marginals } else { typename CUberBlockMatrix::CMakeMatrixRef<n_row_k_size, n_column_j_size>::_TyConst block_j_k = L.t_Block_AtColumn<n_row_k_size, n_column_j_size>(n_column_j, k); // todo - rename to block_k_j, check for similar mistakes _ASSERTE(n_row_k >= n_column_i); // make sure it is in lower tri (don't mind the diagonal) typename CUberBlockMatrix::CMakeMatrixRef<n_column_i_size, n_row_k_size>::_TyConst block_i_k_mar(r_marginals.p_GetExistingBlock(n_column_i, n_row_k, n_column_i_size, n_row_k_size)); // get the corresponding block in the marginals (already calculated at this moment) //block_j_i_mar.noalias() += (block_i_k_mar * block_j_k).transpose(); // 10.36 on 10k //block_j_i_mar.noalias() += block_j_k.transpose() * block_i_k_mar.transpose(); // 10.38 on 10k //block_j_i_mar += block_i_k_mar.lazyProduct(block_j_k).transpose(); // 10.43 on 10k block_j_i_mar += block_j_k.transpose().lazyProduct(block_i_k_mar.transpose()); // 10.35 on 10k // t_odo - try with lazyProduct(), try reverse order product and transpose the factors themselves // add dot of one column of the block with span of the current column of the marginals } } }; /** * @brief off-diagonal middle loop body; calculates a single * off-diagonal block of the marginals matrix * * @tparam n_column_j_size is size of the column (or row) j * @tparam CContext2 is a typelist, containing list of block sizes and size of column i */ template <const int n_column_j_size, class CContext2> struct TOffDiagMiddleLoop { typedef typename CContext2::_TyHead CMatrixBlockSizeList; /**< @brief block sizes */ typedef typename CContext2::_TyTail::_TyHead CColumnISize; /**< @brief size of column i */ /** * @brief off-diagonal middle loop implementation * @param[in] t_ctx is loop context * @note This function throws std::bad_alloc. */ static inline void Do(TMiddleOffDiagContext t_ctx) // throw(std::bad_alloc) { enum { n_column_i_size = CColumnISize::n_size // make this variable-like }; const size_t n_column_i = t_ctx.n_column_i; const CUberBlockMatrix &L = t_ctx.L; TBlockMatrixLookup &r_marginals = t_ctx.r_marginals; const size_t n_column_j = t_ctx.n_column_j; const Eigen::VectorXd &r_inv_diag_L = t_ctx.r_inv_diag_L; // unwrap the contexts _ASSERTE(n_column_j_size == L.n_BlockColumn_Column_Num(n_column_j)); size_t n_column_j_base = L.n_BlockColumn_Base(n_column_j); size_t n_column_j_block_num = L.n_BlockColumn_Block_Num(n_column_j); // t_odo - rename *block_j* to *column_j* // gets the corresponding block column typename CUberBlockMatrix::CMakeMatrixRef<n_column_j_size, n_column_j_size>::_TyConst cur_L_diag_block = L.t_Block_AtColumn<n_column_j_size, n_column_j_size>(n_column_j, 0); // t_odo - rename those to have "L" in them _ASSERTE(L.n_Block_Row(n_column_j, 0) == n_column_j); // make sure it is at the diagonal (L has symmetric layout) _ASSERTE(n_column_j < n_column_i); // strictly upper even typename CUberBlockMatrix::CMakeMatrixRef<n_column_j_size, n_column_i_size>::_Ty block_j_i_mar(r_marginals.p_GetNewBlock(n_column_j, n_column_i, n_column_j_size, n_column_i_size)); // t_odo rename this // work with blocks also in the matrix of the marginals const typename Eigen::VectorBlock<const Eigen::VectorXd, n_column_j_size> cur_L_inv_diag_span = r_inv_diag_L.segment<n_column_j_size>(n_column_j_base); //Eigen::Matrix<double, n_column_j_size, n_column_i_size> block_j_i_mar; // use local matrix block_j_i_mar.setZero(); for(size_t k = 1; k < n_column_j_block_num; ++ k) { // all the blocks except the diagonal size_t n_row_k = L.n_Block_Row(n_column_j, k); size_t n_row_k_size = L.n_BlockRow_Row_Num(n_row_k); //size_t n_row_k_base = L.n_BlockRow_Base(n_row_k); if(n_row_k/*_base*/ < n_column_i/*_base*/) { typedef typename MakeTypelist_Safe((fbs_ut::CCTSize2D<n_column_j_size, n_column_i_size>, fbs_ut::CCTFlag<true>)) _TySecondaryContext; // secondary context for the loop fbs_ut::CWrap2<TOffDiagInnerLoop, _TySecondaryContext>::template In_RowHeight_DecisionTree_Given_ColumnWidth<CMatrixBlockSizeList, n_column_j_size>(int(n_row_k_size), TInnerOffDiagContext(t_ctx, /*n_column_j,*/ k, n_row_k, block_j_i_mar.data())); } else { typedef typename MakeTypelist_Safe((fbs_ut::CCTSize2D<n_column_j_size, n_column_i_size>, fbs_ut::CCTFlag<false>)) _TySecondaryContext; // secondary context for the loop for(;;) { fbs_ut::CWrap2<TOffDiagInnerLoop, _TySecondaryContext>::template In_RowHeight_DecisionTree_Given_ColumnWidth<CMatrixBlockSizeList, n_column_j_size>(int(n_row_k_size), TInnerOffDiagContext(t_ctx, /*n_column_j,*/ k, n_row_k, block_j_i_mar.data())); if(++ k == n_column_j_block_num) break; n_row_k = L.n_Block_Row(n_column_j, k); n_row_k_size = L.n_BlockRow_Row_Num(n_row_k); //n_row_k_base = L.n_BlockRow_Base(n_row_k); _ASSERTE(n_row_k/*_base*/ >= n_column_i/*_base*/); // once lower, always lower } // t_odo - this branch will be used for all subsequent blocks; make another // loop here without the test, then break out from the outer loop // does not save much, but doesn't complicate the code either break; } // call the inner loop } // calculate the first part of the sum in completely blockwise manner for(size_t n_elem = n_column_i_size; n_elem > 0;) { // go backwards -- n_elem; // here #if 0 // different code for the last row of the result block { size_t n_elem2 = n_column_j_size - 1; // the last diagonal double f_L_jj_inv = cur_L_inv_diag_span(n_elem2);//1 / cur_L_diag_block(n_elem2, n_elem2); // get the diagonal block and element double f_diag_sum = block_j_i_mar(n_elem2, n_elem); // (calculated blockwise) // continue from here // no additional dot product, there is nothing below (would be size 0 dot product, complicated _FBS) block_j_i_mar(n_elem2, n_elem) = -f_diag_sum * f_L_jj_inv; // write only upper triangular, will mirror it at once (or not at all) } // the loop skips one elem for(size_t n_elem2 = n_column_j_size - 1; n_elem2 > 0;) { // go backwards #else // 1 for(size_t n_elem2 = n_column_j_size; n_elem2 > 0;) { // go backwards #endif // 1 -- n_elem2; // here double f_L_jj_inv = cur_L_inv_diag_span(n_elem2);//1 / cur_L_diag_block(n_elem2, n_elem2); //double f_L_jj_inv_gt = 1 / cur_L_diag_block(n_elem2, n_elem2); // get the diagonal block and element double f_diag_sum = block_j_i_mar(n_elem2, n_elem); // (calculated blockwise) // continue from here { size_t n_first_underdiag_elem = n_elem2 + 1; size_t n_underdiag_elem_num = n_column_j_size - n_first_underdiag_elem; f_diag_sum += block_j_i_mar.col(n_elem).segment(n_first_underdiag_elem, n_underdiag_elem_num).dot(cur_L_diag_block.col(n_elem2).tail(n_underdiag_elem_num)); // todo - _FBS it // complete the sum } // this is complicated in block approach (maybe not, maybe it is just the task for a triangular view) block_j_i_mar(n_elem2, n_elem) = -f_diag_sum * f_L_jj_inv; // write only upper triangular, will mirror it at once (or not at all) } } // finish the calculation in elementwise manner (can't be vectorized, the order is important) #ifdef _DEBUG if(_isnan(block_j_i_mar.norm())) { fprintf(stderr, "error: marginal block (" PRIsize ", " PRIsize ") contains NaNs\n", n_column_j, n_column_i); } if(r_marginals.p_ground_truth) { const Eigen::MatrixXd &r_gt = *r_marginals.p_ground_truth; size_t n_column_i_base = L.n_BlockColumn_Base(n_column_i); Eigen::Block<const Eigen::MatrixXd, n_column_j_size, n_column_i_size> block_j_i_gt = r_gt.block<n_column_j_size, n_column_i_size>(n_column_j_base, n_column_i_base); double f_error; Eigen::MatrixXd diff = (block_j_i_gt - block_j_i_mar); // required by g++, norm of Eigen::Block seems problematic if((f_error = /*(block_j_i_gt - block_j_i_mar)*/diff.norm()) > 1e-5) { fprintf(stderr, "error: marginal block (" PRIsize ", " PRIsize ") is off by %g\n", n_column_j, n_column_i, f_error); CDebug::Print_DenseMatrix_in_MatlabFormat(stdout, block_j_i_gt, "block_j_i_gt = "); CDebug::Print_DenseMatrix_in_MatlabFormat(stdout, block_j_i_mar, "block_j_i_mar = "); } } #endif // _DEBUG // check the block } }; /** * @brief outer loop body; calculates one block column of the marginals * * @tparam n_column_i_size is size of the current column, in elements * @tparam CMatrixBlockSizeList is a list of possible matrix block sizes */ template <const int n_column_i_size, class CMatrixBlockSizeList> struct TOuterLoop { /** * @brief outer loop implementation * @param[in] t_ctx is loop context * @note This function throws std::bad_alloc. */ static inline void Do(TOuterContext t_ctx) // throw(std::bad_alloc) { const size_t n_column_i = t_ctx.n_column_i; const CUberBlockMatrix &R = t_ctx.R; const CUberBlockMatrix &L = t_ctx.L; TBlockMatrixLookup/*Eigen::MatrixXd*/ &r_marginals = t_ctx.r_marginals; const Eigen::VectorXd &r_inv_diag_L = t_ctx.r_inv_diag_L; const int n_column_part = t_ctx.n_column_part; // unwrap the context _ASSERTE(size_t(n_column_i_size) == L.n_BlockColumn_Column_Num(n_column_i)); size_t n_column_i_base = L.n_BlockColumn_Base(n_column_i); size_t n_column_i_block_num = L.n_BlockColumn_Block_Num(n_column_i); // t_odo - rename *block_i* to *column_i* // gets the corresponding block col const typename Eigen::VectorBlock<const Eigen::VectorXd, n_column_i_size> cur_L_inv_diag_span = r_inv_diag_L.segment<n_column_i_size>(n_column_i_base); // get diagonal span if(n_column_part != 2) { // if 2, then we are doing the full column, which also means that we are running the 2nd pass, and the diagonal block is already computed, no need to redo // not so sure about it at the moment // todo - put it back typename CUberBlockMatrix::CMakeMatrixRef<n_column_i_size, n_column_i_size>::_Ty block_i_i_mar(r_marginals.p_GetNewBlock(n_column_i, n_column_i, n_column_i_size, n_column_i_size)); //Eigen::Matrix<double, n_column_i_size, n_column_i_size> block_i_i_mar; // uninit is ok //TDiagBlock block_i_i_mar = // r_marginals.block<n_column_i_size, n_column_i_size>(n_column_i_base, n_column_i_base); // work with blocks also in the matrix of the marginals typename CUberBlockMatrix::CMakeMatrixRef<n_column_i_size, n_column_i_size>::_TyConst cur_L_diag_block = L.t_Block_AtColumn<n_column_i_size, n_column_i_size>(n_column_i, 0); _ASSERTE(L.n_Block_Row(n_column_i, 0) == n_column_i); // make sure it is at the diagonal (L has symmetric layout) // get diag block //cur_L_inv_diag_span = /*1 / */cur_L_diag_block.diagonal().array().inverse(); // invert the diagonal (with SSE?) // this is wrong, the rest of the blocks will need higher diagonals, will ref uninit data // need to do this beforehand (the first column can in worst case reference all the diagonals) #ifdef __MARGINALS_RECURRENT_KERNEL_USE_BLOCKY_DIAGONAL_LOOP block_i_i_mar.setZero(); for(size_t j = 1; j < n_column_i_block_num; ++ j) { // all the blocks except the diagonal size_t n_row_j = L.n_Block_Row(n_column_i, j); size_t n_row_j_size = L.n_BlockColumn_Column_Num(n_row_j); /*_ASSERTE(n_row_j >= n_column_i); // all in lower tri fbs_ut::CWrap<TOffDiagInnerLoop>::template In_RowHeight_DecisionTree_Given_ColumnWidth<CMatrixBlockSizeList, n_column_i_size>(int(n_row_j_size), TInnerOffDiagContext<n_column_i_size, n_column_i_size, false>(t_ctx, n_column_i, j, n_row_j, block_i_i_mar)); // t_odo - this is slightly more complicated than it needs to be, n_column_i is there twice now*/ fbs_ut::CWrap2<TDiagInnerLoop_Blocky, fbs_ut::CCTSize<n_column_i_size> >::template In_RowHeight_DecisionTree_Given_ColumnWidth<CMatrixBlockSizeList, n_column_i_size>(int(n_row_j_size), TInnerDiagContext_Blocky(t_ctx, j, n_row_j, block_i_i_mar.data())); // more specialized version // call the inner loop } // calculate the first part of the sum in completely blockwise manner // note that only triangular part is required so some extra calculations are performed // but at the same time the blocks are only accessed once, possibly saving memory bandwidth #endif // __MARGINALS_RECURRENT_KERNEL_USE_BLOCKY_DIAGONAL_LOOP for(size_t n_elem = n_column_i_size; n_elem > 0;) { // go backwards -- n_elem; // here for(size_t n_elem2 = n_elem + 1; n_elem2 > 0;) { // for all the elements above or on diagonal in this marginal block -- n_elem2; // here double f_L_jj_inv = cur_L_inv_diag_span(n_elem2);//1 / cur_L_diag_block(n_elem2, n_elem2); //double f_L_jj_inv_gt = 1 / cur_L_diag_block(n_elem2, n_elem2); #ifdef __MARGINALS_RECURRENT_KERNEL_USE_BLOCKY_DIAGONAL_LOOP double f_diag_sum = block_i_i_mar(n_elem2, n_elem); // calculated blockwise #else // __MARGINALS_RECURRENT_KERNEL_USE_BLOCKY_DIAGONAL_LOOP double f_diag_sum = 0; for(size_t j = 1; j < n_column_i_block_num; ++ j) { // all the blocks except the diagonal size_t n_row_j = L.n_Block_Row(n_column_i, j); size_t n_row_j_size = L.n_BlockColumn_Column_Num(n_row_j); fbs_ut::CWrap2<TDiagInnerLoop_Elem, fbs_ut::CCTSize<n_column_i_size> >::template In_RowHeight_DecisionTree_Given_ColumnWidth<CMatrixBlockSizeList, n_column_i_size>(int(n_row_j_size), TInnerDiagContext_Elem(t_ctx, j, n_row_j, n_elem, n_elem2, f_diag_sum)); // call the inner loop } //double f_from_mat = block_i_i_mar(n_elem, n_elem2), f_fm2 = block_i_i_mar(n_elem2, n_elem); //_ASSERTE(fabs(f_diag_sum - block_i_i_mar(n_elem2, n_elem)) < 1e-5f); // todo - this can be done using blockwise code (instead of row-wise), same as off-diagonal elements #endif // __MARGINALS_RECURRENT_KERNEL_USE_BLOCKY_DIAGONAL_LOOP { size_t n_first_underdiag_elem = n_elem2 + 1; size_t n_underdiag_elem_num = n_column_i_size - n_first_underdiag_elem; //if(n_column_i_size > n_elem2 + 1) // already in the .dot() loop, wouldn't bring any benefit f_diag_sum += block_i_i_mar.col(n_elem).segment(n_first_underdiag_elem, n_underdiag_elem_num).dot(cur_L_diag_block.col(n_elem2).tail(n_underdiag_elem_num)); // todo - _FBS it // complete the sum } if(n_elem2 == n_elem) { block_i_i_mar(n_elem, n_elem) = f_L_jj_inv * (f_L_jj_inv - f_diag_sum); // diagonal elems have a different formula } else { block_i_i_mar(n_elem2, n_elem) = block_i_i_mar(n_elem, n_elem2) = f_L_jj_inv * -f_diag_sum; // use _jj! (different diagonal elem in the same block) } } } #ifdef _DEBUG if(_isnan(block_i_i_mar.norm())) { fprintf(stderr, "error: marginal block (" PRIsize ", " PRIsize ") contains NaNs\n", n_column_i, n_column_i); } if(r_marginals.p_ground_truth) { const Eigen::MatrixXd &r_gt = *r_marginals.p_ground_truth; Eigen::Matrix<double, n_column_i_size, n_column_i_size> block_i_i_gt = r_gt.block<n_column_i_size, n_column_i_size>(n_column_i_base, n_column_i_base); double f_error; if((f_error = (block_i_i_gt - block_i_i_mar).norm()) > 1e-5) { fprintf(stderr, "error: marginal block (" PRIsize ", " PRIsize ") is off by %g\n", n_column_i, n_column_i, f_error); CDebug::Print_DenseMatrix_in_MatlabFormat(stdout, block_i_i_gt, "block_i_i_gt = "); CDebug::Print_DenseMatrix_in_MatlabFormat(stdout, block_i_i_mar, "block_i_i_mar = "); } } #endif // _DEBUG } // do the block at the diagonal if(n_column_part == 1) { // copy sparsity of R size_t n_R_column_i_block_num = R.n_BlockColumn_Block_Num(n_column_i); // or use L and calculate the marginals in lower triangular part (probably a lot of changes) for(size_t j = n_R_column_i_block_num - 1; j > 0;) { -- j; // except the last block (the diagonal one) size_t n_column_j = R.n_Block_Row(n_column_i, j); // this needs R as well // try it like that (calculate marginals of only the spine blocks, that is much faster) //for(size_t j = n_column_i_block_num - 1; j > 0; -- j) { // except the block 0 (the diagonal one) // size_t n_column_j = L.n_BlockColumn_Num() - 1 - L.n_Block_Row(n_column_i, j); // upside-down // this is incorrect, really need R //for(size_t n_column_j = n_column_i; n_column_j > 0;) { // note that this is only required for the blocks that are required on output or blocks that are coincident with nnz blocks in the L or R factor // -- n_column_j; // here // this calculates full matrix size_t n_column_j_size = L.n_BlockColumn_Column_Num(n_column_j); typedef typename MakeTypelist(CMatrixBlockSizeList, fbs_ut::CCTSize<n_column_i_size>) _TySecondaryContext; fbs_ut::CWrap2<TOffDiagMiddleLoop, _TySecondaryContext>::template In_ColumnWidth_DecisionTree<CMatrixBlockSizeList>(int(n_column_j_size), TMiddleOffDiagContext(t_ctx, n_column_j)); // call the middle loop } } else if(n_column_part == 2) { // full column (the part above the diagonal, anyway) _ASSERTE(n_column_i); // zero has no part above, i want the caller to handle this for(size_t n_column_j = n_column_i; n_column_j > 0;) { -- n_column_j; // except the last block (the diagonal one) size_t n_column_j_size = L.n_BlockColumn_Column_Num(n_column_j); typedef typename MakeTypelist(CMatrixBlockSizeList, fbs_ut::CCTSize<n_column_i_size>) _TySecondaryContext; fbs_ut::CWrap2<TOffDiagMiddleLoop, _TySecondaryContext>::template In_ColumnWidth_DecisionTree<CMatrixBlockSizeList>(int(n_column_j_size), TMiddleOffDiagContext(t_ctx, n_column_j)); // call the middle loop } // dense (but under ordering, might run into trouble if ordered improperly) } // note - to calculate full matrix, everything is in place now, calculation of the remaining blocks // are independent of each other, and can be done in parallel! } }; public: /** * @brief loop function (just calls the decision tree) * * @tparam CMatrixBlockSizeList is a list of possible matrix block sizes * * @param[in] n_column_i_size is size of the current column * @param[in] n_column_i is zero-based index of the current column * @param[in] L is reference to the L factor matrix * @param[in] R is reference to the R factor matrix (only its block structure is relevant) * @param[in] r_marginals is reference to the (output) marginals matrix * @param[in] r_inv_diag_L is reference to the precalculated elementwise inverse of the L factor diagonal * @param[in] n_column_part is one of 0 = diagonal only, 1 = copy structure of R, 2 = full column * * @note This function throws std::bad_alloc. */ template <class CBlockMatrixTypelist> static inline void Run(size_t n_column_i_size, size_t n_column_i, const CUberBlockMatrix &L, const CUberBlockMatrix &R, TBlockMatrixLookup &r_marginals, const Eigen::VectorXd &r_inv_diag_L, int n_column_part) // throw(std::bad_alloc) { fbs_ut::CWrap2<TOuterLoop, CBlockMatrixTypelist>::template In_ColumnWidth_DecisionTree<CBlockMatrixTypelist>( int(n_column_i_size), TOuterContext(n_column_i, L, R, r_marginals, r_inv_diag_L, n_column_part)); // wrap the outer loop } }; /** * @brief reference function that calculates blockwise sparse marginals matrix * * @tparam CMatrixBlockSizeList is a list of possible matrix block sizes * * @param[out] r_marginals is filled with the marginals matrix * @param[in] r_R is the Cholesky factor * @param[in] mord is reference to the ordering, used in the Cholesky factorization of r_R * @param[in] n_matrix_part is matrix part to be calculated (note that mpart_FullMatrix * really means the upper triangle, rather than a full dense matrix; default * mpart_Nothing) * @param[in] b_structure_of_R is additional matrix part to be calculated (if set, the * structure of R is calculated; if not set, only the part specified by n_matrix_part * is calculated) * * @note This function throws std::bad_alloc. */ template <class CBlockMatrixTypelist> static void Calculate_DenseMarginals_Recurrent_FBS(CUberBlockMatrix &r_marginals, const CUberBlockMatrix &r_R, const CMatrixOrdering &mord, EBlockMatrixPart n_matrix_part = mpart_Nothing, bool b_structure_of_R = true) // throw(std::bad_alloc) { _ASSERTE(!(n_matrix_part & mpart_Column)); // which column? //if(n_matrix_part == mpart_FullMatrix) // throw std::runtime_error("mpart_FullMatrix in Calculate_DenseMarginals_Recurrent_FBS() not implemented"); // t_odo - implement this not very useful feature (it just takes a *lot* of memory) if((n_matrix_part & mpart_Diagonal) == mpart_Diagonal) b_structure_of_R = true; // trade the diagonal for the structure of R, which includes diagonal if(b_structure_of_R) n_matrix_part = n_MPart_Subtract(n_matrix_part, EBlockMatrixPart(mpart_Diagonal | mpart_LastBlock)); // the last block is implied in the structure of R, as is the diagonal if(!b_structure_of_R && n_matrix_part == mpart_Nothing) { r_marginals.SetZero(); r_marginals.ExtendTo(r_R.n_Row_Num(), r_R.n_Column_Num()); // why bother? return; } // nothing to calculate /*{ const size_t n = r_R.n_Column_Num(); // in elements r_marginals.resize(n, n); }*/ const size_t n = r_R.n_BlockColumn_Num(); // in blocks! CUberBlockMatrix L; L.TransposeOf(r_R); // need transpose of R // todo - get rid of this, just make transpose ptr list, get all the layout info from R // note that the transpose takes less than 1% of time on linux for 300 x 300 sparse R (part of 10k) size_t n_first_block_col = 0; // all columns const size_t n_last_col = n - 1; if(!b_structure_of_R) { _ASSERTE(n_matrix_part == mpart_LastBlock); n_first_block_col = mord.p_Get_InverseOrdering()[n_last_col]; // only the last one (but it is not necessarily the last one in R) } // decide where to finish //printf("this is the correct blockwise Calculate_DenseMarginals_Recurrent_FBS()\n"); // debug _ASSERTE(r_R.b_SymmetricLayout()); // must be symmetric (any result of cholesky would be) // note that the notation in the below code is slightly confusing, as any column in R // is also a row in L and vice versa. there are also some transpose accesses where // row and column are switched. it is a mess, really. // note that R is not used in the below code. although layout of R might reside // in CPU cache, the (dis)advantage should be marginal. i prefer to write code // that can be directly used with L factor, should there be efficient means to // calculate that instead of R (not the case in SLAM++ so far). // the sparse version however requires also the structure of the transpose block matrix // that might mix the things up a bit (dense recursive is slower than the parallelizable // dense fast) //r_marginals.Clear(); // not required, the next line does that L.CopyLayoutTo(r_marginals); // prepare the output matrix for the marginals CSparseBlockMarginals_Recurrent_FBSKernel::TBlockMatrixLookup margs_lookup(r_marginals); /*#ifdef _DEBUG margs_lookup.p_ground_truth = &r_marginals_gt; // set ground truth #endif // _DEBUG*/ Eigen::VectorXd inv_diag_L(L.n_Column_Num()); // vector for storing inverse diagonal of L for(size_t n_column_i = n_first_block_col; n_column_i < n; ++ n_column_i) { CUberBlockMatrix::_TyMatrixXdRef cur_L_diag_block = L.t_Block_AtColumn(n_column_i, 0); _ASSERTE(L.n_Block_Row(n_column_i, 0) == n_column_i); // make sure it is a diagonal block inv_diag_L.segment(L.n_BlockColumn_Base(n_column_i), cur_L_diag_block.cols()) = cur_L_diag_block.diagonal().array().inverse(); // don't re-read the data, invert it now } //inv_diag_L = inv_diag_L.array().inverse(); // long vector, but it re-reads the whole vector // gather and invert the diagonal of L (can be done in parallel or in fixed-size chunks) // todo - FBS inverse? (loop while block size is the same, // return to enter another decision tree only if it changes) // t_odo - add ordered index of the last column to the arguments of this function, handle it as L shape (the ordering breaks the column to an "L" where it goes below the diagonal) if(n_matrix_part == mpart_LastBlock) { _ASSERTE(!b_structure_of_R); // just the last block, right? size_t n_column_i = n - 1; size_t n_column_i_size = L.n_BlockColumn_Column_Num(n_column_i); CSparseBlockMarginals_Recurrent_FBSKernel::Run<CBlockMatrixTypelist>( n_column_i_size, n_column_i, L, r_R, margs_lookup/*r_marginals*/, inv_diag_L, 0); // 0 = diagonal only, 1 = copy structure of R, 2 = full column // FBS column width } else { for(size_t n_column_i = n; n_column_i > n_first_block_col;) { -- n_column_i; // here size_t n_column_i_size = L.n_BlockColumn_Column_Num(n_column_i); CSparseBlockMarginals_Recurrent_FBSKernel::Run<CBlockMatrixTypelist>( n_column_i_size, n_column_i, L, r_R, margs_lookup/*r_marginals*/, inv_diag_L, 1); // 0 = diagonal only, 1 = copy structure of R, 2 = full column // FBS column width } // calculate the structur of R first, up to n_first_block_col if(n_matrix_part == mpart_LastBlock) { // done, have the structure of R, up to n_first_block_col, which will get invpermuted to the last column } else if(n_matrix_part == mpart_FullMatrix) { // note that this could be a part of the below branch for(size_t n_column_i = n; n_column_i > n_first_block_col;) { -- n_column_i; // here size_t n_column_i_size = L.n_BlockColumn_Column_Num(n_column_i); CSparseBlockMarginals_Recurrent_FBSKernel::Run<CBlockMatrixTypelist>( n_column_i_size, n_column_i, L, r_R, margs_lookup/*r_marginals*/, inv_diag_L, 2); // 0 = diagonal only, 1 = copy structure of R, 2 = full column // FBS column width } } else if((n_matrix_part & mpart_LastColumn) == mpart_LastColumn) { if(mord.p_Get_InverseOrdering()[n_last_col] == n_last_col) { size_t n_last_col_size = L.n_BlockColumn_Column_Num(n_last_col); CSparseBlockMarginals_Recurrent_FBSKernel::Run<CBlockMatrixTypelist>( n_last_col_size, n_last_col, L, r_R, margs_lookup/*r_marginals*/, inv_diag_L, 2); // 0 = diagonal only, 1 = copy structure of R, 2 = full column // FBS column width } else { throw std::runtime_error("marginals: the last column not ordered last"); // this is somehow broken, the marginals do not have the same values as they should, don't have time to solve it now /*char p_s_filename[256]; sprintf(p_s_filename, "margs_ordered_%04" _PRIsize ".tga", n); r_marginals.Rasterize(p_s_filename); // debug typename CUberBlockMatrix::CMakeMatrixRef<3, 3>::_TyConst block_i_k_mar(margs_lookup.p_GetExistingBlock(2, 3, 3, 3));*/ // this one is null and it is needed in calculating the column (11th vertex of molson) // more debug - this is nonzero but gets zeroed size_t n_above_col = mord.p_Get_InverseOrdering()[n_last_col]; if(n_above_col) { size_t n_above_col_size = L.n_BlockColumn_Column_Num(n_last_col); CSparseBlockMarginals_Recurrent_FBSKernel::Run<CBlockMatrixTypelist>( n_above_col_size, n_above_col, L, r_R, margs_lookup/*r_marginals*/, inv_diag_L, 2); // 0 = diagonal only, 1 = copy structure of R, 2 = full column } // do the part of the permuted column that is above the diagonal /*r_marginals.Rasterize(p_s_filename);*/ // debug const size_t n_row = n_above_col; for(size_t i = n; i > n_above_col;) { -- i; // todo - calculate a single block at (n_row, i) } } } // calculate the last column / full matrix using the structure of R (the last column does need the full structure of R, sadly) } //margs.Convert_to_Dense(r_marginals); // write back to a dense matrix /*CUberBlockMatrix margs; r_R.CopyLayoutTo(margs); {for(size_t i = 0, n = margs.n_BlockColumn_Num(); i < n; ++ i) { for(size_t j = 0; j < n; ++ j) { margs.t_GetBlock_Log(j, i, margs.n_BlockRow_Row_Num(j), margs.n_BlockColumn_Column_Num(i), true, false) = r_marginals.block(margs.n_BlockRow_Base(j), margs.n_BlockColumn_Base(i), margs.n_BlockRow_Row_Num(j), margs.n_BlockColumn_Column_Num(i)); } }} margs.Rasterize("margs_covariance.tga");*/ // debug //r_marginals.triangularView<Eigen::StrictlyLower>() = // r_marginals.triangularView<Eigen::StrictlyUpper>().transpose(); // mirror upper to lower // transpose elements below diagonal to elements above it // this is *crazy* slow, takes 8 seconds on 10k matrix // big todo - check all the code where triangularView() is used, see if it can be done better } /** * @brief reference function that calculates dense marginals matrix * * @param[out] r_marginals is filled with the marginals matrix * @param[in] r_R is the Cholesky factor * * @note This function throws std::bad_alloc. */ static void Calculate_DenseMarginals_Recurrent(Eigen::MatrixXd &r_marginals, const CUberBlockMatrix &r_R) // throw(std::bad_alloc) { { const size_t n = r_R.n_Column_Num(); // in elements r_marginals.resize(n, n); } //r_marginals.setZero(); // debug //r_R.Rasterize("margs_R.tga"); // debug const size_t n = r_R.n_BlockColumn_Num(); // in blocks! CUberBlockMatrix L; L.TransposeOf(r_R); // need transpose of R // todo - get rid of this, just make transpose ptr list, get all the layout info from R // note that the transpose takes less than 1% of time on linux for 300 x 300 sparse R (part of 10k) //printf("this is the correct blockwise Calculate_DenseMarginals_Recurrent()\n"); // debug _ASSERTE(r_R.b_SymmetricLayout()); // must be symmetric (any result of cholesky would be) // note that the notation in the below code is slightly confusing, as any column in R // is also a row in L and vice versa. there are also some transpose accesses where // row and column are switched. it is a mess, really. // note that R is not used in the below code. although layout of R might reside // in CPU cache, the (dis)advantage should be marginal. i prefer to write code // that can be directly used with L factor, should there be efficient means to // calculate that instead of R (not the case in SLAM++ so far). for(size_t n_column_i = n; n_column_i > 0;) { -- n_column_i; // here size_t n_column_i_size = L.n_BlockColumn_Column_Num(n_column_i); size_t n_column_i_base = L.n_BlockColumn_Base(n_column_i); size_t n_column_i_block_num = L.n_BlockColumn_Block_Num(n_column_i); // t_odo - rename *block_i* to *column_i* // gets the corresponding block col { Eigen::Block<Eigen::MatrixXd> block_i_i_mar = r_marginals.block(n_column_i_base, n_column_i_base, n_column_i_size, n_column_i_size); // work with blocks also in the matrix of the marginals CUberBlockMatrix::_TyMatrixXdRef cur_L_diag_block = L.t_Block_AtColumn(n_column_i, 0); _ASSERTE(L.n_Block_Row(n_column_i, 0) == n_column_i); // make sure it is at the diagonal (L has symmetric layout) for(size_t n_elem = n_column_i_size; n_elem > 0;) { // go backwards -- n_elem; // here #if 0 // no extra code path for the diagonal element (it has slightly simpler addressing) double f_L_ii = cur_L_diag_block(n_elem, n_elem); double f_L_ii_inv = 1 / f_L_ii; // get the diagonal element { double f_diag_sum = 0; for(size_t j = 1; j < n_column_i_block_num; ++ j) { // all the blocks except the diagonal CUberBlockMatrix::_TyMatrixXdRef block_i_j = L.t_Block_AtColumn(n_column_i, j); size_t n_row_j_size = block_i_j.rows(); size_t n_row_j_base = L.n_BlockRow_Base(L.n_Block_Row(n_column_i, j)); // look up the row in L _ASSERTE(L.n_BlockColumn_Column_Num(n_column_i) == n_column_i_size); _ASSERTE(L.n_BlockColumn_Column_Num(L.n_Block_Row(n_column_i, j)) == n_row_j_size); // symmetric layout, cols and rows interchangeable // make sure such block can exist in L (or R) Eigen::Block<Eigen::MatrixXd> block_i_j_mar = r_marginals.block(n_column_i_base, n_row_j_base, n_column_i_size, n_row_j_size); // get the corresponding block in the marginals (already calculated at this moment) // note that (n_row_j, n_column_i) would be lower diagonal, here we access // the transpose block accross the diagonal _ASSERTE(&block_i_j_mar.row(n_elem)(0) == &r_marginals.row(n_column_i_base + n_elem).segment(n_row_j_base, n_row_j_size)(0)); // make sure we got the addressing right _ASSERTE(n_row_j_base > n_column_i_base); // make sure the below dot product will sample uper diagonal of the marginals f_diag_sum += block_i_j_mar.row(n_elem).dot(block_i_j.col(n_elem)); // todo - _FBS it //f_diag_sum += r_marginals.row(n_column_i_base + n_elem).segment(n_row_j_base, // t_odo - try .block<1, Dynamic>(n_row_j_base, i, 0, n_row_j_size) or something like that? // n_row_j_size).dot(block_i_j.col(n_elem)); // add dot of one column of the block with span of the current column of the marginals // t_odo - blockify the access to r_marginals } { size_t n_row_j_size = n_column_i_size;//cur_L_diag_block.rows(); // also symmetric size_t n_row_j_base = n_column_i_base; // it is symmetric // look up the row in R (= column in L) size_t n_first_underdiag_elem = n_elem + 1; size_t n_underdiag_elem_num = n_column_i_size - n_first_underdiag_elem; _ASSERTE(n_elem + 1 == n_column_i_size || &block_i_i_mar.col(n_elem)(n_elem + 1) == &r_marginals.col(n_column_i_base + n_elem).segment(n_first_underdiag_elem + n_row_j_base, n_underdiag_elem_num)(0)); // make sure we got the addressing right f_diag_sum += block_i_i_mar.col(n_elem).segment(n_first_underdiag_elem, n_underdiag_elem_num).dot(cur_L_diag_block.col(n_elem).tail(n_underdiag_elem_num)); //f_diag_sum += r_marginals.col(n_column_i_base + n_elem).segment(n_first_underdiag_elem + n_row_j_base, // n_underdiag_elem_num).dot(cur_L_diag_block.col(n_elem).tail( // n_row_j_size - n_first_underdiag_elem)); // todo - _FBS it // complete the sum // t_odo - blockify the access to r_marginals (through cur_L_diag_block? the info should be there) } block_i_i_mar(n_elem, n_elem) = f_L_ii_inv * (f_L_ii_inv - f_diag_sum); } // calculate the diagonal element for(size_t n_elem2 = n_elem; n_elem2 > 0;) { // for the rest of the elements above diagonal in this marginal block -- n_elem2; // here #else // 0 for(size_t n_elem2 = n_elem + 1; n_elem2 > 0;) { // for all the elements above or on diagonal in this marginal block -- n_elem2; // here #endif // 0 double f_L_jj = cur_L_diag_block(n_elem2, n_elem2); double f_L_jj_inv = 1 / f_L_jj; double f_diag_sum = 0; for(size_t j = 1; j < n_column_i_block_num; ++ j) { // all the blocks except the diagonal CUberBlockMatrix::_TyMatrixXdRef block_i_j = L.t_Block_AtColumn(n_column_i, j); size_t n_row_j_size = block_i_j.rows(); size_t n_row_j_base = L.n_BlockRow_Base(L.n_Block_Row(n_column_i, j)); // look up the row in R (= column in L) _ASSERTE(L.n_BlockColumn_Column_Num(n_column_i) == n_column_i_size); _ASSERTE(L.n_BlockColumn_Column_Num(L.n_Block_Row(n_column_i, j)) == n_row_j_size); // symmetric layout, cols and rows interchangeable // make sure such block can exist in L (or R) Eigen::Block<Eigen::MatrixXd> block_i_j_mar = r_marginals.block(n_column_i_base, n_row_j_base, n_column_i_size, n_row_j_size); // get the corresponding block in the marginals (already calculated at this moment) // note that (n_row_j, n_column_i) would be lower diagonal, here we access // the transpose block accross the diagonal _ASSERTE(&block_i_j_mar.row(n_elem)(0) == &r_marginals.row(n_column_i_base + n_elem).segment(n_row_j_base, n_row_j_size)(0)); // make sure we got the addressing right _ASSERTE(n_row_j_base > n_column_i_base); // make sure the below dot product will sample uper diagonal of the marginals f_diag_sum += block_i_j_mar.row(n_elem).dot(block_i_j.col(n_elem2)); // todo - _FBS it //f_diag_sum += r_marginals.row(n_column_i_base + n_elem).segment(n_row_j_base, // t_odo - try .block<1, Dynamic>(n_row_j_base, i, 0, n_row_j_size) or something like that? // n_row_j_size).dot(block_i_j.col(n_elem2)); // t_odo - _FBS it // add dot of one column of the block with span of the current column of the marginals // t_odo - blockify the access to r_marginals } { size_t n_row_j_size = n_column_i_size;//cur_L_diag_block.rows(); // also symmetric size_t n_row_j_base = n_column_i_base; // it is symmetric // look up the row in R (= column in L) size_t n_first_underdiag_elem = n_elem2 + 1; size_t n_underdiag_elem_num = n_column_i_size - n_first_underdiag_elem; _ASSERTE(n_elem2 + 1 == n_column_i_size || &block_i_i_mar.col(n_elem)(n_elem2 + 1) == &r_marginals.col(n_column_i_base + n_elem).segment(n_first_underdiag_elem + n_row_j_base, n_underdiag_elem_num)(0)); // make sure we got the addressing right f_diag_sum += block_i_i_mar.col(n_elem).segment(n_first_underdiag_elem, n_underdiag_elem_num).dot(cur_L_diag_block.col(n_elem2).tail(n_underdiag_elem_num)); // todo - _FBS it //f_diag_sum += r_marginals.col(n_column_i_base + n_elem).segment(n_first_underdiag_elem + n_row_j_base, // n_underdiag_elem_num).dot(cur_L_diag_block.col(n_elem2).tail( // n_row_j_size - n_first_underdiag_elem)); // complete the sum // t_odo - blockify the access to r_marginals (through cur_L_diag_block? the info should be there) } // note that this is NOT the same code as for the diagonal entries, the final modification, the writes and the addressing with n_elem2 are different // note that n_elem2 could be also introduced in the diagonal branch above and held equal to n_elem if(n_elem2 == n_elem) { block_i_i_mar(n_elem, n_elem) = f_L_jj_inv * (f_L_jj_inv - f_diag_sum); // diagonal elems have a different formula } else { block_i_i_mar(n_elem2, n_elem) = block_i_i_mar(n_elem, n_elem2) = f_L_jj_inv * -f_diag_sum; // use _jj! (different diagonal elem in the same block) } // this is not required, if the extra code path for the diagonal element is enabled // (only the else part is required then, the branch is avoided) /*_ASSERTE(&block_i_i_mar(n_elem2, n_elem) == &r_marginals(n_column_i_base + n_elem2, n_column_i_base + n_elem)); _ASSERTE(&block_i_i_mar(n_elem, n_elem2) == &r_marginals(n_column_i_base + n_elem, n_column_i_base + n_elem2)); _ASSERTE(r_marginals_gt(n_column_i_base + n_elem, n_column_i_base + n_elem2) == r_marginals_gt(n_column_i_base + n_elem2, n_column_i_base + n_elem)); // strong == ok here, should be symmetric _ASSERTE(fabs(r_marginals_gt(n_column_i_base + n_elem2, n_column_i_base + n_elem) - block_i_i_mar(n_elem2, n_elem)) < std::max(1e-5, 1e-5 * r_marginals_gt(n_column_i_base + n_elem2, n_column_i_base + n_elem)));*/ // compare to ground-truth } // calculate the off-diagonal elements inside this block } } // do the block at the diagonal for(size_t n_column_j = n_column_i; n_column_j > 0;) { // note that this is only required for the blocks that are required on output or blocks that are coincident with nnz blocks in the L or R factor -- n_column_j; // here size_t n_column_j_size = L.n_BlockColumn_Column_Num(n_column_j); size_t n_column_j_base = L.n_BlockColumn_Base(n_column_j); size_t n_column_j_block_num = L.n_BlockColumn_Block_Num(n_column_j); // t_odo - rename *block_j* to *column_j* // gets the corresponding block column _ASSERTE(n_column_j_base < n_column_i_base); // strictly upper even Eigen::Block<Eigen::MatrixXd> block_j_i_mar = r_marginals.block(n_column_j_base, n_column_i_base, n_column_j_size, n_column_i_size); // in upper triangular // t_odo rename this // work with blocks also in the matrix of the marginals CUberBlockMatrix::_TyMatrixXdRef cur_L_diag_block = L.t_Block_AtColumn(n_column_j, 0); // t_odo - rename those to have "L" in them _ASSERTE(L.n_Block_Row(n_column_j, 0) == n_column_j); // make sure it is at the diagonal (L has symmetric layout) block_j_i_mar.setZero(); for(size_t k = 1; k < n_column_j_block_num; ++ k) { // all the blocks except the diagonal CUberBlockMatrix::_TyMatrixXdRef block_j_k = L.t_Block_AtColumn(n_column_j, k); size_t n_row_k_size = block_j_k.rows(); size_t n_row_k_base = L.n_BlockRow_Base(L.n_Block_Row(n_column_j, k)); // look up the row in R (= column in L) _ASSERTE(L.n_BlockColumn_Column_Num(n_column_i) == n_column_i_size); _ASSERTE(L.n_BlockColumn_Column_Num(L.n_Block_Row(n_column_j, k)) == n_row_k_size); // symmetric layout, cols and rows interchangeable // make sure such block can exist in L (or R) Eigen::Block<Eigen::MatrixXd> block_i_k_mar = (n_row_k_base >= n_column_i_base)? r_marginals.block(n_column_i_base, n_row_k_base, n_column_i_size, n_row_k_size) : r_marginals.block(n_row_k_base, n_column_i_base, n_row_k_size, n_column_i_size); // get the corresponding block in the marginals (already calculated at this moment) if(n_row_k_base >= n_column_i_base) { // would be in lower diagonal // todo - this branch will be used for all subsequent blocks; make another loop here without the test, then break out from the outer loop _ASSERTE(&block_i_k_mar.row(/*n_elem*/0)(0) == &r_marginals.row(n_column_i_base + /*n_elem*/0).segment( n_row_k_base, n_row_k_size)(0) && (block_i_k_mar.rows() == 1 || &block_i_k_mar.row(/*n_elem*/1)(0) == &r_marginals.row(n_column_i_base + /*n_elem*/1).segment( n_row_k_base, n_row_k_size)(0))); // t_odo - incomplete check, need to check at least one more address if size is greater than 1 // make sure we got the addressing right (checks the first elem and if there is one, also the second elem) block_j_i_mar.noalias() += (block_i_k_mar * block_j_k).transpose(); // todo - try with lazyProduct(), try reverse order product and transpose the factors themselves //f_diag_sum += block_i_k_mar.row(n_elem).dot(block_j_k.col(n_elem2)); // t_odo - _FBS it //f_diag_sum += r_marginals.row(n_column_i_base + n_elem).segment(n_row_k_base, // n_row_k_size).dot(block_j_k.col(n_elem2)); } else { _ASSERTE(&block_i_k_mar.transpose().row(/*n_elem*/0)(0) == &r_marginals.col(n_column_i_base + /*n_elem*/0).segment( n_row_k_base, n_row_k_size)(0) && (block_i_k_mar.cols() == 1 || &block_i_k_mar.transpose().row(/*n_elem*/1)(0) == &r_marginals.col(n_column_i_base + /*n_elem*/1).segment( n_row_k_base, n_row_k_size)(0))); // t_odo - incomplete check, need to check at least one more address if size is greater than 1 // make sure we got the addressing right (checks the first elem and if there is one, also the second elem) block_j_i_mar.noalias() += block_j_k.transpose() * block_i_k_mar; // todo - try with lazyProduct() //f_diag_sum += block_i_k_mar.transpose().row(n_elem).dot(block_j_k.col(n_elem2)); // t_odo - _FBS it //f_diag_sum += r_marginals.col(n_column_i_base + n_elem).segment(n_row_k_base, // n_row_k_size).dot(block_j_k.col(n_elem2)); // t_odo - _FBS it } // add dot of one column of the block with span of the current column of the marginals } // calculate the first part of the sum in completely blockwise manner for(size_t n_elem = n_column_i_size; n_elem > 0;) { // go backwards -- n_elem; // here #if 1 // different code for the last row of the result block { size_t n_elem2 = n_column_j_size - 1; // the last diagonal double f_L_jj_inv = 1 / cur_L_diag_block(n_elem2, n_elem2); // get the diagonal block and element double f_diag_sum = block_j_i_mar(n_elem2, n_elem); // (calculated blockwise) // continue from here // no additional dot product, there is nothing below (would be size 0 dot product, complicated _FBS) block_j_i_mar(n_elem2, n_elem) = -f_diag_sum * f_L_jj_inv; // write only upper triangular, will mirror it at once (or not at all) /*_ASSERTE(&block_j_i_mar(n_elem2, n_elem) == &r_marginals(n_column_j_base + n_elem2, n_column_i_base + n_elem)); _ASSERTE(fabs(block_j_i_mar(n_elem2, n_elem) - r_marginals_gt(n_column_j_base + n_elem2, n_column_i_base + n_elem)) < std::max(1e-5, 1e-5 * r_marginals_gt(n_column_j_base + n_elem2, n_column_i_base + n_elem)));*/ // make sure it is correct } // the loop skips one elem for(size_t n_elem2 = n_column_j_size - 1; n_elem2 > 0;) { // go backwards #else // 1 for(size_t n_elem2 = n_column_j_size; n_elem2 > 0;) { // go backwards #endif // 1 -- n_elem2; // here double f_L_jj_inv = 1 / cur_L_diag_block(n_elem2, n_elem2); // get the diagonal block and element double f_diag_sum = block_j_i_mar(n_elem2, n_elem); // (calculated blockwise) // continue from here { size_t n_row_k_size = n_column_j_size;//cur_L_diag_block.rows(); // also symmetric size_t n_row_k_base = n_column_j_base; // it is symmetric // look up the row in R (= column in L) size_t n_first_underdiag_elem = n_elem2 + 1; size_t n_underdiag_elem_num = n_column_j_size - n_first_underdiag_elem; f_diag_sum += block_j_i_mar.col(n_elem).segment(n_first_underdiag_elem, n_underdiag_elem_num).dot(cur_L_diag_block.col(n_elem2).tail(n_underdiag_elem_num)); // todo - _FBS it //f_diag_sum += r_marginals.col(n_column_i_base + n_elem).segment(n_first_underdiag_elem + // n_row_k_base, n_underdiag_elem_num).dot(cur_L_diag_block.col(n_elem2).tail( // n_row_k_size - n_first_underdiag_elem)); // t_odo - _FBS it // complete the sum } // this is complicated in block approach (maybe not, maybe it is just the task for a triangular view) block_j_i_mar(n_elem2, n_elem) = -f_diag_sum * f_L_jj_inv; // write only upper triangular, will mirror it at once (or not at all) /*_ASSERTE(&block_j_i_mar(n_elem2, n_elem) == &r_marginals(n_column_j_base + n_elem2, n_column_i_base + n_elem)); _ASSERTE(fabs(block_j_i_mar(n_elem2, n_elem) - r_marginals_gt(n_column_j_base + n_elem2, n_column_i_base + n_elem)) < std::max(1e-5, 1e-5 * r_marginals_gt(n_column_j_base + n_elem2, n_column_i_base + n_elem)));*/ // make sure it is correct } } // t_odo - get rid of this, do it in blockwise manner //block_j_i_mar = block_j_i_mar/*.triangularView<Eigen::StrictlyLower>()*/ * // cur_L_diag_block.triangularView<Eigen::StrictlyLower>().transpose(); // does not work, will not work, have to do it blockwise // t_odo - separate the two loops below, see how these are interdependent // the triangular part can't be done in blockwise manner as the order of elementwise // calculation is important } } /*CUberBlockMatrix margs; r_R.CopyLayoutTo(margs); {for(size_t i = 0, n = margs.n_BlockColumn_Num(); i < n; ++ i) { for(size_t j = 0; j < n; ++ j) { margs.t_GetBlock_Log(j, i, margs.n_BlockRow_Row_Num(j), margs.n_BlockColumn_Column_Num(i), true, false) = r_marginals.block(margs.n_BlockRow_Base(j), margs.n_BlockColumn_Base(i), margs.n_BlockRow_Row_Num(j), margs.n_BlockColumn_Column_Num(i)); } }} margs.Rasterize("margs_covariance.tga");*/ // debug r_marginals.triangularView<Eigen::StrictlyLower>() = r_marginals.triangularView<Eigen::StrictlyUpper>().transpose(); // mirror upper to lower // transpose elements below diagonal to elements above it } /** * @brief development version of the recurrent function that calculates dense marginals matrix * * @param[out] r_marginals is filled with the marginals matrix * @param[in] r_marginals_gt is the marginals matrix ground truth * @param[in] r_R is the Cholesky factor * * @note This function throws std::bad_alloc. */ static void Calculate_DenseMarginals_Recurrent_Devel(Eigen::MatrixXd &r_marginals, const Eigen::MatrixXd &r_marginals_gt, const CUberBlockMatrix &r_R) // throw(std::bad_alloc) { { const size_t n = r_R.n_Column_Num(); // in elements r_marginals.resize(n, n); } const size_t n = r_R.n_BlockColumn_Num(); // in blocks! CUberBlockMatrix L; L.TransposeOf(r_R); // todo - get rid of this, just make transpose ptr list, get all the layout info from R // need transpose of R //printf("this is the correct blockwise Calculate_DenseMarginals_Recurrent()\n"); // debug _ASSERTE(r_R.b_SymmetricLayout()); // must be sym for(size_t n_block_i = n; n_block_i > 0;) { -- n_block_i; // here size_t n_block_i_size = L.n_BlockColumn_Column_Num(n_block_i); size_t n_block_i_base = L.n_BlockColumn_Base(n_block_i); size_t n_block_i_block_num = L.n_BlockColumn_Block_Num(n_block_i); // todo - rename *block_i* to *column_i* // gets the corresponding block col { Eigen::Block<Eigen::MatrixXd> dest_block = r_marginals.block(n_block_i_base, n_block_i_base, n_block_i_size, n_block_i_size); // work with blocks also in the matrix of the marginals CUberBlockMatrix::_TyMatrixXdRef cur_L_diag_block = L.t_Block_AtColumn(n_block_i, 0); _ASSERTE(L.n_Block_Row(n_block_i, 0) == n_block_i); // make sure it is at the diagonal (L has symmetric layout) for(size_t n_elem = n_block_i_size; n_elem > 0;) { // go backwards -- n_elem; // here double f_L_ii = cur_L_diag_block(n_elem, n_elem); double f_L_ii_inv = 1 / f_L_ii; // get the diagonal element { double f_diag_sum = 0; for(size_t j = 1; j < n_block_i_block_num; ++ j) { // all the blocks except the diagonal CUberBlockMatrix::_TyMatrixXdRef block_i_j = L.t_Block_AtColumn(n_block_i, j); size_t n_block_row_size = block_i_j.rows(); size_t n_block_row_base = L.n_BlockRow_Base(L.n_Block_Row(n_block_i, j)); // todo - rename this to col // look up the row in R (= column in L) _ASSERTE(L.n_BlockColumn_Column_Num(n_block_i) == n_block_i_size); _ASSERTE(L.n_BlockColumn_Column_Num(L.n_Block_Row(n_block_i, j)) == n_block_row_size); // symmetric layout, cols and rows interchangeable // make sure such block can exist in L (or R) Eigen::Block<Eigen::MatrixXd> block_i_j_mar = r_marginals.block(n_block_i_base, n_block_row_base, n_block_i_size, n_block_row_size); // get the corresponding block in the marginals (already calculated at this moment) _ASSERTE(&block_i_j_mar.row(n_elem)(0) == &r_marginals.row(n_block_i_base + n_elem).segment(n_block_row_base, n_block_row_size)(0)); // make sure we got the addressing right _ASSERTE(n_block_row_base > n_block_i_base); // make sure the below dot product will sample uper diagonal of the marginals f_diag_sum += block_i_j_mar.row(n_elem).dot(block_i_j.col(n_elem)); // todo - _FBS it //f_diag_sum += r_marginals.row(n_block_i_base + n_elem).segment(n_block_row_base, // t_odo - try .block<1, Dynamic>(n_block_row_base, i, 0, n_block_row_size) or something like that? // n_block_row_size).dot(block_i_j.col(n_elem)); // add dot of one column of the block with span of the current column of the marginals // t_odo - blockify the access to r_marginals } { size_t n_block_row_size = n_block_i_size;//cur_L_diag_block.rows(); // also symmetric size_t n_block_row_base = n_block_i_base; // it is symmetric // look up the row in R (= column in L) size_t n_first_underdiag_elem = n_elem + 1; size_t n_underdiag_elem_num = n_block_i_size - n_first_underdiag_elem; _ASSERTE(n_elem + 1 == n_block_i_size || &dest_block.col(n_elem)(n_elem + 1) == &r_marginals.col(n_block_i_base + n_elem).segment(n_first_underdiag_elem + n_block_row_base, n_underdiag_elem_num)(0)); // make sure we got the addressing right f_diag_sum += dest_block.col(n_elem).segment(n_first_underdiag_elem, n_underdiag_elem_num).dot(cur_L_diag_block.col(n_elem).tail(n_underdiag_elem_num)); //f_diag_sum += r_marginals.col(n_block_i_base + n_elem).segment(n_first_underdiag_elem + n_block_row_base, // n_underdiag_elem_num).dot(cur_L_diag_block.col(n_elem).tail( // n_block_row_size - n_first_underdiag_elem)); // todo - _FBS it // complete the sum // t_odo - blockify the access to r_marginals (through cur_L_diag_block? the info should be there) } dest_block(n_elem, n_elem) = f_L_ii_inv * (f_L_ii_inv - f_diag_sum); } // calculate the diagonal element _ASSERTE(&dest_block(n_elem, n_elem) == &r_marginals(n_block_i_base + n_elem, n_block_i_base + n_elem)); _ASSERTE(fabs(r_marginals_gt(n_block_i_base + n_elem, n_block_i_base + n_elem) - dest_block(n_elem, n_elem)) < std::max(1e-5, 1e-5 * r_marginals_gt(n_block_i_base + n_elem, n_block_i_base + n_elem))); // compare to ground-truth for(size_t n_elem2 = n_elem; n_elem2 > 0;) { // for the rest of the elements above diagonal in this marginal block -- n_elem2; // here double f_L_jj = cur_L_diag_block(n_elem2, n_elem2); double f_L_jj_inv = 1 / f_L_jj; double f_diag_sum = 0; for(size_t j = 1; j < n_block_i_block_num; ++ j) { // all the blocks except the diagonal CUberBlockMatrix::_TyMatrixXdRef block_i_j = L.t_Block_AtColumn(n_block_i, j); size_t n_block_row_size = block_i_j.rows(); size_t n_block_row_base = L.n_BlockRow_Base(L.n_Block_Row(n_block_i, j)); // look up the row in R (= column in L) _ASSERTE(L.n_BlockColumn_Column_Num(n_block_i) == n_block_i_size); _ASSERTE(L.n_BlockColumn_Column_Num(L.n_Block_Row(n_block_i, j)) == n_block_row_size); // symmetric layout, cols and rows interchangeable // make sure such block can exist in L (or R) Eigen::Block<Eigen::MatrixXd> block_i_j_mar = r_marginals.block(n_block_i_base, n_block_row_base, n_block_i_size, n_block_row_size); // get the corresponding block in the marginals (already calculated at this moment) _ASSERTE(&block_i_j_mar.row(n_elem)(0) == &r_marginals.row(n_block_i_base + n_elem).segment(n_block_row_base, n_block_row_size)(0)); // make sure we got the addressing right _ASSERTE(n_block_row_base > n_block_i_base); // make sure the below dot product will sample uper diagonal of the marginals f_diag_sum += block_i_j_mar.row(n_elem).dot(block_i_j.col(n_elem2)); // todo - _FBS it //f_diag_sum += r_marginals.row(n_block_i_base + n_elem).segment(n_block_row_base, // todo - try .block<1, Dynamic>(n_block_row_base, i, 0, n_block_row_size) or something like that? // n_block_row_size).dot(block_i_j.col(n_elem2)); // todo - _FBS it // add dot of one column of the block with span of the current column of the marginals // todo - blockify the access to r_marginals } { size_t n_block_row_size = n_block_i_size;//cur_L_diag_block.rows(); // also symmetric size_t n_block_row_base = n_block_i_base; // it is symmetric // look up the row in R (= column in L) size_t n_first_underdiag_elem = n_elem2 + 1; size_t n_underdiag_elem_num = n_block_i_size - n_first_underdiag_elem; _ASSERTE(n_elem2 + 1 == n_block_i_size || &dest_block.col(n_elem)(n_elem2 + 1) == &r_marginals.col(n_block_i_base + n_elem).segment(n_first_underdiag_elem + n_block_row_base, n_underdiag_elem_num)(0)); // make sure we got the addressing right f_diag_sum += dest_block.col(n_elem).segment(n_first_underdiag_elem, n_underdiag_elem_num).dot(cur_L_diag_block.col(n_elem2).tail(n_underdiag_elem_num)); // todo - _FBS it //f_diag_sum += r_marginals.col(n_block_i_base + n_elem).segment(n_first_underdiag_elem + n_block_row_base, // n_underdiag_elem_num).dot(cur_L_diag_block.col(n_elem2).tail( // n_block_row_size - n_first_underdiag_elem)); // complete the sum // t_odo - blockify the access to r_marginals (through cur_L_diag_block? the info should be there) } // note that this is NOT the same code as for the diagonal entries, the final modification, the writes and the addressing with n_elem2 are different // note that n_elem2 could be also introduced in the diagonal branch above and held equal to n_elem dest_block(n_elem2, n_elem) = dest_block(n_elem, n_elem2) = f_L_jj_inv * -f_diag_sum; // use _jj! (different diagonal elem in the same block) _ASSERTE(&dest_block(n_elem2, n_elem) == &r_marginals(n_block_i_base + n_elem2, n_block_i_base + n_elem)); _ASSERTE(&dest_block(n_elem, n_elem2) == &r_marginals(n_block_i_base + n_elem, n_block_i_base + n_elem2)); _ASSERTE(r_marginals_gt(n_block_i_base + n_elem, n_block_i_base + n_elem2) == r_marginals_gt(n_block_i_base + n_elem2, n_block_i_base + n_elem)); // strong == ok here, should be symmetric _ASSERTE(fabs(r_marginals_gt(n_block_i_base + n_elem2, n_block_i_base + n_elem) - dest_block(n_elem2, n_elem)) < std::max(1e-5, 1e-5 * r_marginals_gt(n_block_i_base + n_elem2, n_block_i_base + n_elem))); // compare to ground-truth } // calculate the off-diagonal elements inside this block } } // do the block at the diagonal for(size_t n_block_j = n_block_i; n_block_j > 0;) { // note that this is only required for the blocks that are required on output or blocks that are coincident with nnz blocks in the L or R factor -- n_block_j; // here size_t n_block_j_size = L.n_BlockColumn_Column_Num(n_block_j); size_t n_block_j_base = L.n_BlockColumn_Base(n_block_j); size_t n_block_j_block_num = L.n_BlockColumn_Block_Num(n_block_j); // todo - rename *block_j* to *row_j* // gets the corresponding block row _ASSERTE(n_block_j_base < n_block_i_base); // strictly upper even Eigen::Block<Eigen::MatrixXd> dest_block = r_marginals.block(n_block_j_base, n_block_i_base, n_block_j_size, n_block_i_size); // in upper triangular // todo rename this // work with blocks also in the matrix of the marginals CUberBlockMatrix::_TyMatrixXdRef cur_L_diag_block = L.t_Block_AtColumn(n_block_j, 0); // todo - rename those to have "L" in them _ASSERTE(L.n_Block_Row(n_block_j, 0) == n_block_j); // make sure it is at the diagonal (L has symmetric layout) #if 0 Eigen::MatrixXd result_block(n_block_j_size, n_block_i_size); // t_odo - get rid of this, can do it inplace in dest_block #else // 0 Eigen::Block<Eigen::MatrixXd> &result_block = dest_block; // now it is the same thing #endif // 0 result_block.setZero(); // to contain data of blockwise calculation (for comparison) for(size_t k = 1; k < n_block_j_block_num; ++ k) { // all the blocks except the diagonal CUberBlockMatrix::_TyMatrixXdRef block_j_k = L.t_Block_AtColumn(n_block_j, k); size_t n_block_row_size = block_j_k.rows(); size_t n_block_row_base = L.n_BlockRow_Base(L.n_Block_Row(n_block_j, k)); // look up the row in R (= column in L) _ASSERTE(L.n_BlockColumn_Column_Num(n_block_i) == n_block_i_size); _ASSERTE(L.n_BlockColumn_Column_Num(L.n_Block_Row(n_block_j, k)) == n_block_row_size); // symmetric layout, cols and rows interchangeable // make sure such block can exist in L (or R) Eigen::Block<Eigen::MatrixXd> block_i_k_mar = (n_block_row_base >= n_block_i_base)? r_marginals.block(n_block_i_base, n_block_row_base, n_block_i_size, n_block_row_size) : r_marginals.block(n_block_row_base, n_block_i_base, n_block_row_size, n_block_i_size); // get the corresponding block in the marginals (already calculated at this moment) if(n_block_row_base >= n_block_i_base) { // would be in lower diagonal // todo - this branch will be used for all subsequent blocks; make another loop here without the test, then break out from the outer loop _ASSERTE(&block_i_k_mar.row(/*n_elem*/0)(0) == &r_marginals.row(n_block_i_base + /*n_elem*/0).segment( n_block_row_base, n_block_row_size)(0) && (block_i_k_mar.rows() == 1 || &block_i_k_mar.row(/*n_elem*/1)(0) == &r_marginals.row(n_block_i_base + /*n_elem*/1).segment( n_block_row_base, n_block_row_size)(0))); // t_odo - incomplete check, need to check at least one more address if size is greater than 1 // make sure we got the addressing right (checks the first elem and if there is one, also the second elem) result_block.noalias() += (block_i_k_mar * block_j_k).transpose(); // todo - try with lazyProduct(), try reverse order product and transpose the factors themselves //f_diag_sum += block_i_k_mar.row(n_elem).dot(block_j_k.col(n_elem2)); // todo - _FBS it //f_diag_sum += r_marginals.row(n_block_i_base + n_elem).segment(n_block_row_base, // n_block_row_size).dot(block_j_k.col(n_elem2)); } else { _ASSERTE(&block_i_k_mar.transpose().row(/*n_elem*/0)(0) == &r_marginals.col(n_block_i_base + /*n_elem*/0).segment( n_block_row_base, n_block_row_size)(0) && (block_i_k_mar.cols() == 1 || &block_i_k_mar.transpose().row(/*n_elem*/1)(0) == &r_marginals.col(n_block_i_base + /*n_elem*/1).segment( n_block_row_base, n_block_row_size)(0))); // t_odo - incomplete check, need to check at least one more address if size is greater than 1 // make sure we got the addressing right (checks the first elem and if there is one, also the second elem) result_block.noalias() += block_j_k.transpose() * block_i_k_mar; // todo - try with lazyProduct() //f_diag_sum += block_i_k_mar.transpose().row(n_elem).dot(block_j_k.col(n_elem2)); // todo - _FBS it //f_diag_sum += r_marginals.col(n_block_i_base + n_elem).segment(n_block_row_base, // n_block_row_size).dot(block_j_k.col(n_elem2)); // todo - _FBS it } // add dot of one column of the block with span of the current column of the marginals } // calculate the first part of the sum in completely blockwise manner for(size_t n_elem = n_block_i_size; n_elem > 0;) { // go backwards -- n_elem; // here #if 1 // different code for the last row of the result block { size_t n_elem2 = n_block_j_size - 1; // the last diagonal double f_L_jj_inv = 1 / cur_L_diag_block(n_elem2, n_elem2); // get the diagonal block and element double f_diag_sum = result_block(n_elem2, n_elem); // (calculated blockwise) // continue from here // no additional dot product, there is nothing below (would be size 0 dot product, complicated _FBS) dest_block(n_elem2, n_elem) = -f_diag_sum * f_L_jj_inv; // write only upper triangular, will mirror it at once (or not at all) _ASSERTE(&dest_block(n_elem2, n_elem) == &r_marginals(n_block_j_base + n_elem2, n_block_i_base + n_elem)); _ASSERTE(fabs(dest_block(n_elem2, n_elem) - r_marginals_gt(n_block_j_base + n_elem2, n_block_i_base + n_elem)) < std::max(1e-5, 1e-5 * r_marginals_gt(n_block_j_base + n_elem2, n_block_i_base + n_elem))); // make sure it is correct } // the loop skips one elem for(size_t n_elem2 = n_block_j_size - 1; n_elem2 > 0;) { // go backwards #else // 1 for(size_t n_elem2 = n_block_j_size; n_elem2 > 0;) { // go backwards #endif // 1 -- n_elem2; // here double f_L_jj_inv = 1 / cur_L_diag_block(n_elem2, n_elem2); // get the diagonal block and element #if 0 // calculates the result elementwise, compares to result_block (calculated blockwise) _ASSERTE(dest_block.data() != result_block.data()); // if this triggers, need to change the above ifdef, containing declaration of result_block accordingly double f_diag_sum = 0; // this is a matrix (!) now for(size_t k = 1; k < n_block_j_block_num; ++ k) { // all the blocks except the diagonal CUberBlockMatrix::_TyMatrixXdRef block_j_k = L.t_Block_AtColumn(n_block_j, k); size_t n_block_row_size = block_j_k.rows(); size_t n_block_row_base = L.n_BlockRow_Base(L.n_Block_Row(n_block_j, k)); // look up the row in R (= column in L) _ASSERTE(L.n_BlockColumn_Column_Num(n_block_i) == n_block_i_size); _ASSERTE(L.n_BlockColumn_Column_Num(L.n_Block_Row(n_block_j, k)) == n_block_row_size); // symmetric layout, cols and rows interchangeable // make sure such block can exist in L (or R) Eigen::Block<Eigen::MatrixXd> block_i_k_mar = (n_block_row_base >= n_block_i_base)? r_marginals.block(n_block_i_base, n_block_row_base, n_block_i_size, n_block_row_size) : r_marginals.block(n_block_row_base, n_block_i_base, n_block_row_size, n_block_i_size); // get the corresponding block in the marginals (already calculated at this moment) if(n_block_row_base >= n_block_i_base) { // would be in lower diagonal // todo - this branch will be used for all subsequent blocks; make another loop here without the test, then break out from the outer loop _ASSERTE(&block_i_k_mar.row(n_elem)(0) == &r_marginals.row(n_block_i_base + n_elem).segment(n_block_row_base, n_block_row_size)(0)); // make sure we got the addressing right f_diag_sum += block_i_k_mar.row(n_elem).dot(block_j_k.col(n_elem2)); // todo - _FBS it //f_diag_sum += r_marginals.row(n_block_i_base + n_elem).segment(n_block_row_base, // todo - try .block<1, Dynamic>(n_block_row_base, j, 0, n_block_row_size) or something like that? // n_block_row_size).dot(block_j_k.col(n_elem2)); } else { _ASSERTE(&block_i_k_mar.transpose().row(n_elem)(0) == &r_marginals.col(n_block_i_base + n_elem).segment(n_block_row_base, n_block_row_size)(0)); // make sure we got the addressing right f_diag_sum += block_i_k_mar.transpose().row(n_elem).dot(block_j_k.col(n_elem2)); // todo - _FBS it //f_diag_sum += r_marginals.col(n_block_i_base + n_elem).segment(n_block_row_base, // todo - try .block<1, Dynamic>(n_block_row_base, j, 0, n_block_row_size) or something like that? // n_block_row_size).dot(block_j_k.col(n_elem2)); // todo - _FBS it } // add dot of one column of the block with span of the current column of the marginals } // t_odo - access marginals by blocks before moving on _ASSERTE(fabs(f_diag_sum - result_block(n_elem2, n_elem)) < std::max(1e-5, 1e-5 * f_diag_sum)); // check blockwise calculation #else // 0 double f_diag_sum = result_block(n_elem2, n_elem); // (calculated blockwise) // continue from here #endif // 0 { size_t n_block_row_size = n_block_j_size;//cur_L_diag_block.rows(); // also symmetric size_t n_block_row_base = n_block_j_base; // it is symmetric // look up the row in R (= column in L) size_t n_first_underdiag_elem = n_elem2 + 1; size_t n_underdiag_elem_num = n_block_j_size - n_first_underdiag_elem; f_diag_sum += dest_block.col(n_elem).segment(n_first_underdiag_elem, n_underdiag_elem_num).dot(cur_L_diag_block.col(n_elem2).tail(n_underdiag_elem_num)); // todo - _FBS it //f_diag_sum += r_marginals.col(n_block_i_base + n_elem).segment(n_first_underdiag_elem + // n_block_row_base, n_underdiag_elem_num).dot(cur_L_diag_block.col(n_elem2).tail( // n_block_row_size - n_first_underdiag_elem)); // todo - _FBS it // complete the sum // todo - this reuses the values of the diagonal block computed above } // this is complicated in block approach (maybe not, maybe it is just the task for a triangular view) dest_block(n_elem2, n_elem) = -f_diag_sum * f_L_jj_inv; // write only upper triangular, will mirror it at once (or not at all) _ASSERTE(&dest_block(n_elem2, n_elem) == &r_marginals(n_block_j_base + n_elem2, n_block_i_base + n_elem)); _ASSERTE(fabs(dest_block(n_elem2, n_elem) - r_marginals_gt(n_block_j_base + n_elem2, n_block_i_base + n_elem)) < std::max(1e-5, 1e-5 * r_marginals_gt(n_block_j_base + n_elem2, n_block_i_base + n_elem))); // make sure it is correct } } // todo - get rid of this, do it in blockwise manner //result_block = dest_block/*.triangularView<Eigen::StrictlyLower>()*/ * // cur_L_diag_block.triangularView<Eigen::StrictlyLower>().transpose(); // does not work, will not work // todo - separate the two loops below, see how these are interdependent // the triangular part can't be done in blockwise manner as the order of elementwise // calculation is important } continue; // this is the end of a single block column process #if 0 { for(size_t j = i; j > 0;) { // note that this is only required for the elements that are required on output or elements that are above nnz in the L or R factor -- j; // j is i in the book // i is k in the book size_t n_block_j_size; size_t n_block_j = L.n_Find_BlockColumn(j, n_block_j_size); size_t n_block_j_base = L.n_BlockColumn_Base(n_block_j); size_t n_block_j_block_num = L.n_BlockColumn_Block_Num(n_block_j); CUberBlockMatrix::_TyMatrixXdRef cur_L_diag_block = L.t_Block_AtColumn(n_block_j, 0); double f_L_jj = cur_L_diag_block(j - n_block_j_base, j - n_block_j_base); double f_L_jj_inv = 1 / f_L_jj; // get the diagonal block and element double f_diag_sum = 0; for(size_t k = 1; k < n_block_j_block_num; ++ k) { // all the blocks except the diagonal CUberBlockMatrix::_TyMatrixXdRef block_j_k = L.t_Block_AtColumn(n_block_j, k); size_t n_block_row_size = block_j_k.rows(); size_t n_block_row_base = L.n_BlockRow_Base(L.n_Block_Row(n_block_j, k)); // look up the row in R (= column in L) f_diag_sum += r_marginals.row(i).segment(n_block_row_base, // todo - try .block<1, Dynamic>(n_block_row_base, j, 0, n_block_row_size) or something like that? n_block_row_size).dot(block_j_k.col(j - n_block_j_base)); // todo - _FBS it // add dot of one column of the block with span of the current column of the marginals } { size_t n_block_row_size = n_block_j_size;//cur_L_diag_block.rows(); // also symmetric size_t n_block_row_base = n_block_j_base; // it is symmetric // look up the row in R (= column in L) size_t n_subblock_col = j - n_block_j_base; size_t n_first_underdiag_elem = n_subblock_col + 1; size_t n_underdiag_elem_num = n_block_j_size - n_first_underdiag_elem; f_diag_sum += r_marginals.row(i).segment(n_first_underdiag_elem + n_block_row_base, n_underdiag_elem_num).dot(cur_L_diag_block.col(n_subblock_col).tail( n_block_row_size - n_first_underdiag_elem)); // todo - _FBS it // complete the sum } #if 0 && defined(_DEBUG) double f_sum_Rjk_Cik = 0;//, f_sum_part0, f_sum_first_elem; for(size_t k = j + 1; k < n; ++ k) { size_t n_block_k_size; size_t n_block_k = L.n_Find_BlockColumn(k, n_block_k_size); size_t n_block_k_base = L.n_BlockColumn_Base(n_block_k); CUberBlockMatrix::_TyMatrixXdRef block_j_k = L.t_GetBlock_Log(n_block_k, n_block_j); if(!block_j_k.cols()) continue; // no such block (zero, sparse) // todo - rewrite to form of a loop over *existing* blocks f_sum_Rjk_Cik += r_marginals(i, k) * block_j_k(k - n_block_k_base, j - n_block_j_base); // product } _ASSERTE(fabs(f_sum_Rjk_Cik - f_diag_sum) < std::max(1e-5, fabs(f_sum_Rjk_Cik))); // use the "old" code to verify corectness #endif // 0 && _DEBUG /*{ // debugging of more recurrent formula size_t m = i + 1; // wha? size_t n_block_m_size; size_t n_block_m = R.n_Find_BlockColumn(m, n_block_m_size); size_t n_block_m_base = R.n_BlockColumn_Base(n_block_m); CUberBlockMatrix::_TyMatrixXdRef diag_block_m = R.t_BlockAt(R.n_BlockColumn_Block_Num(n_block_m) - 1, n_block_m); double f_R_mm = cur_L_diag_block(m - n_block_m_base, m - n_block_m_base); double f_sum_part1 = -(C_dep(m, m) * f_R_mm - 1 / f_R_mm); double f_sum_parted = f_sum_part0 + f_sum_part1; double f_sum_err = fabs(f_sum_parted - f_sum_Rjk_Cik); }*/ /*CUberBlockMatrix::_TyMatrixXdRef block_j_j = L.t_Block_AtColumn(n_block_j, L.n_BlockColumn_Block_Num(n_block_j) - 1); double L_j_j = block_j_j(j - n_block_j_base, j - n_block_j_base); // get another diagonal element of R*/ // unused r_marginals(i, j) = r_marginals(j, i) = -f_diag_sum * f_L_jj_inv; // write only upper triangular, will mirror it at once (or not at all) // todo - see to it that only upper triangular is accessed, add _ASSERTE() where reading it to make sure lower is not touched } // calculate C_dep_{n - 1 - i} by recurrent formula } #endif // 0 } //r_marginals.triangularView<Eigen::StrictlyUpper>() = // r_marginals.triangularView<Eigen::StrictlyLower>().transpose(); // mirror lower to upper r_marginals.triangularView<Eigen::StrictlyLower>() = r_marginals.triangularView<Eigen::StrictlyUpper>().transpose(); // mirror upper to lower // transpose elements below diagonal to elements above it } /** * @brief recurrent elementwise function that calculates dense marginals matrix * * @param[out] r_marginals is filled with the marginals matrix * @param[in] r_R is the Cholesky factor * * @note This function throws std::bad_alloc. */ static void Calculate_DenseMarginals_Recurrent_Elemwise(Eigen::MatrixXd &r_marginals, const CUberBlockMatrix &r_R) // throw(std::bad_alloc) { const size_t n = r_R.n_Column_Num(); // in elements r_marginals.resize(n, n); CUberBlockMatrix L; L.TransposeOf(r_R); // need transpose of R //printf("this is the correct Calculate_DenseMarginals_Recurrent()\n"); // debug for(size_t i = n; i > 0;) { -- i; // here /*if(i == n - 1) { // the last row / col is easy, as it has no refs to the next ones Eigen::MatrixXd::ColXpr last_col = r_marginals.col(n - 1); size_t lb = r_R.n_BlockColumn_Num() - 1; CUberBlockMatrix::_TyMatrixXdRef last_block = r_R.t_Block_AtColumn(lb, r_R.n_BlockColumn_Block_Num(lb) - 1); last_col.setZero(); // !! last_col(n - 1) = 1 / last_block(last_block.rows() - 1, last_block.cols() - 1); r_R.UpperTriangular_Solve(&last_col(0), n); // all columns needed // calculates the whole last column of C r_marginals.row(n - 1).head(n - 1) = r_marginals.col(n - 1).head(n - 1).transpose(); // copy that also to the last row, to form the full matrix and not just upper-triangular } else*/ // the code above is simple, but the usolve is quite expensive { // columns with references to the subsequent columns size_t n_block_column_size; size_t n_block_column = L.n_Find_BlockColumn(i, n_block_column_size); size_t n_block_column_base = L.n_BlockColumn_Base(n_block_column); size_t n_block_column_block_num = L.n_BlockColumn_Block_Num(n_block_column); // gets the corresponding block col (can use decrementing strategy like in C_direct) { CUberBlockMatrix::_TyMatrixXdRef cur_L_diag_block = L.t_Block_AtColumn(n_block_column, 0); double f_L_ii = cur_L_diag_block(i - n_block_column_base, i - n_block_column_base); double f_L_ii_inv = 1 / f_L_ii; // get the diagonal element double f_diag_sum = 0; for(size_t j = 1; j < n_block_column_block_num; ++ j) { // all the blocks except the diagonal CUberBlockMatrix::_TyMatrixXdRef block_i_j = L.t_Block_AtColumn(n_block_column, j); size_t n_block_row_size = block_i_j.rows(); size_t n_block_row_base = L.n_BlockRow_Base(L.n_Block_Row(n_block_column, j)); // look up the row in R (= column in L) f_diag_sum += r_marginals.row(i).segment(n_block_row_base, // todo - try .block<1, Dynamic>(n_block_row_base, i, 0, n_block_row_size) or something like that? n_block_row_size).dot(block_i_j.col(i - n_block_column_base)); // todo - _FBS it // add dot of one column of the block with span of the current column of the marginals } { size_t n_block_row_size = n_block_column_size;//cur_L_diag_block.rows(); // also symmetric size_t n_block_row_base = n_block_column_base; // it is symmetric // look up the row in R (= column in L) size_t n_subblock_col = i - n_block_column_base; size_t n_first_underdiag_elem = n_subblock_col + 1; size_t n_underdiag_elem_num = n_block_column_size - n_first_underdiag_elem; f_diag_sum += r_marginals.row(i).segment(n_first_underdiag_elem + n_block_row_base, n_underdiag_elem_num).dot(cur_L_diag_block.col(n_subblock_col).tail( n_block_row_size - n_first_underdiag_elem)); // todo - _FBS it // complete the sum //for(size_t j = i - n_block_column_base + 1; j < n_block_column_size; ++ j) // f_diag_sum += r_marginals(i, j + n_block_row_base) * cur_L_diag_block(j, i - n_block_column_base); // t_odo - make this a dot product on spans as well } #if 0 && defined(_DEBUG) double f_ref_diag_sum = 0; for(size_t j = i + 1; j < n; ++ j) { size_t n_block_row_size; size_t n_block_row = r_R.n_Find_BlockColumn(j, n_block_row_size); // R has symmetric layout size_t n_block_row_base = r_R.n_BlockColumn_Base(n_block_row); // look up the row in R (= column in L) CUberBlockMatrix::_TyMatrixXdRef block_i_j = L.t_GetBlock_Log(n_block_row, n_block_column); if(!block_i_j.cols()) continue; // no such block (zero, sparse) // todo - rewrite to form of a loop over *existing* blocks instead of looking up all blocks in dense manner double R_i_j = block_i_j(j - n_block_row_base, i - n_block_column_base); f_ref_diag_sum += r_marginals(i, j) * R_i_j; // this is bad, accesses the matrix by rows (need transpose) } _ASSERTE(fabs(f_ref_diag_sum - f_diag_sum) < std::max(1e-5, fabs(f_ref_diag_sum))); #endif // 0 && _DEBUG r_marginals(i, i) = f_L_ii_inv * (f_L_ii_inv - f_diag_sum); // calculate the diagonal element } for(size_t j = i; j > 0;) { // note that this is only required for the elements that are required on output or elements that are above nnz in the L or R factor -- j; // j is i in the book // i is k in the book size_t n_block_j_size; size_t n_block_j = L.n_Find_BlockColumn(j, n_block_j_size); size_t n_block_j_base = L.n_BlockColumn_Base(n_block_j); size_t n_block_j_block_num = L.n_BlockColumn_Block_Num(n_block_j); CUberBlockMatrix::_TyMatrixXdRef cur_L_diag_block = L.t_Block_AtColumn(n_block_j, 0); double f_L_jj = cur_L_diag_block(j - n_block_j_base, j - n_block_j_base); double f_L_jj_inv = 1 / f_L_jj; // get the diagonal block and element double f_diag_sum = 0; for(size_t k = 1; k < n_block_j_block_num; ++ k) { // all the blocks except the diagonal CUberBlockMatrix::_TyMatrixXdRef block_j_k = L.t_Block_AtColumn(n_block_j, k); size_t n_block_row_size = block_j_k.rows(); size_t n_block_row_base = L.n_BlockRow_Base(L.n_Block_Row(n_block_j, k)); // look up the row in R (= column in L) f_diag_sum += r_marginals.row(i).segment(n_block_row_base, // todo - try .block<1, Dynamic>(n_block_row_base, j, 0, n_block_row_size) or something like that? n_block_row_size).dot(block_j_k.col(j - n_block_j_base)); // todo - _FBS it // add dot of one column of the block with span of the current column of the marginals } { size_t n_block_row_size = n_block_j_size;//cur_L_diag_block.rows(); // also symmetric size_t n_block_row_base = n_block_j_base; // it is symmetric // look up the row in R (= column in L) size_t n_subblock_col = j - n_block_j_base; size_t n_first_underdiag_elem = n_subblock_col + 1; size_t n_underdiag_elem_num = n_block_j_size - n_first_underdiag_elem; f_diag_sum += r_marginals.row(i).segment(n_first_underdiag_elem + n_block_row_base, n_underdiag_elem_num).dot(cur_L_diag_block.col(n_subblock_col).tail( n_block_row_size - n_first_underdiag_elem)); // todo - _FBS it // complete the sum } #if 0 && defined(_DEBUG) double f_sum_Rjk_Cik = 0;//, f_sum_part0, f_sum_first_elem; for(size_t k = j + 1; k < n; ++ k) { size_t n_block_k_size; size_t n_block_k = L.n_Find_BlockColumn(k, n_block_k_size); size_t n_block_k_base = L.n_BlockColumn_Base(n_block_k); CUberBlockMatrix::_TyConstMatrixXdRef block_j_k = L.t_GetBlock_Log(n_block_k, n_block_j); if(!block_j_k.cols()) continue; // no such block (zero, sparse) // todo - rewrite to form of a loop over *existing* blocks f_sum_Rjk_Cik += r_marginals(i, k) * block_j_k(k - n_block_k_base, j - n_block_j_base); // product } _ASSERTE(fabs(f_sum_Rjk_Cik - f_diag_sum) < std::max(1e-5, fabs(f_sum_Rjk_Cik))); // use the "old" code to verify corectness #endif // 0 && _DEBUG /*{ // debugging of more recurrent formula size_t m = i + 1; // wha? size_t n_block_m_size; size_t n_block_m = R.n_Find_BlockColumn(m, n_block_m_size); size_t n_block_m_base = R.n_BlockColumn_Base(n_block_m); CUberBlockMatrix::_TyConstMatrixXdRef diag_block_m = R.t_BlockAt(R.n_BlockColumn_Block_Num(n_block_m) - 1, n_block_m); double f_R_mm = cur_L_diag_block(m - n_block_m_base, m - n_block_m_base); double f_sum_part1 = -(C_dep(m, m) * f_R_mm - 1 / f_R_mm); double f_sum_parted = f_sum_part0 + f_sum_part1; double f_sum_err = fabs(f_sum_parted - f_sum_Rjk_Cik); }*/ /*CUberBlockMatrix::_TyConstMatrixXdRef block_j_j = L.t_Block_AtColumn(n_block_j, L.n_BlockColumn_Block_Num(n_block_j) - 1); double L_j_j = block_j_j(j - n_block_j_base, j - n_block_j_base); // get another diagonal element of R*/ // unused r_marginals(i, j) = r_marginals(j, i) = -f_diag_sum * f_L_jj_inv; // write only upper triangular, will mirror it at once (or not at all) // todo - see to it that only upper triangular is accessed, add _ASSERTE() where reading it to make sure lower is not touched } // calculate C_dep_{n - 1 - i} by recurrent formula } } //r_marginals.triangularView<Eigen::StrictlyUpper>() = // r_marginals.triangularView<Eigen::StrictlyLower>().transpose(); // mirror lower to upper //r_marginals.triangularView<Eigen::StrictlyLower>() = // r_marginals.triangularView<Eigen::StrictlyUpper>().transpose(); // mirror upper to lower // transpose elements below diagonal to elements above it } /** * @brief recurrent elementwise FBS function that calculates dense marginals matrix * * @param[out] r_marginals is filled with the marginals matrix * @param[in] r_R is the Cholesky factor * * @note This function throws std::bad_alloc. */ template <class CBlockMatrixTypelist> static void Calculate_DenseMarginals_Recurrent_Elemwise_FBS(Eigen::MatrixXd &r_marginals, const CUberBlockMatrix &r_R) // throw(std::bad_alloc) { const size_t n = r_R.n_Column_Num(); // in elements r_marginals.resize(n, n); CUberBlockMatrix L; L.TransposeOf(r_R); // need transpose of R enum { b_single_block_size = CTypelistLength<CBlockMatrixTypelist>::n_result == 1, n_first_block_size = fbs_ut::CEigenToDimension<typename CBlockMatrixTypelist::_TyHead>::_TyResult::n_column_num }; // optimize for just a single size in the typelist (compile-time constants) //printf("this is the FBS version of the correct Calculate_DenseMarginals_Recurrent()\n"); // debug for(size_t i = n; i > 0;) { -- i; // here /*if(i == n - 1) { // the last row / col is easy, as it has no refs to the next ones Eigen::MatrixXd::ColXpr last_col = r_marginals.col(n - 1); size_t lb = r_R.n_BlockColumn_Num() - 1; CUberBlockMatrix::_TyMatrixXdRef last_block = r_R.t_Block_AtColumn(lb, r_R.n_BlockColumn_Block_Num(lb) - 1); last_col.setZero(); // !! last_col(n - 1) = 1 / last_block(last_block.rows() - 1, last_block.cols() - 1); r_R.UpperTriangular_Solve_FBS<CBlockMatrixTypelist>(&last_col(0), n); // all columns needed // calculates the whole last column of C r_marginals.row(n - 1).head(n - 1) = r_marginals.col(n - 1).head(n - 1).transpose(); // copy that also to the last row, to form the full matrix and not just upper-triangular } else*/ // the code above is simple, but the usolve is quite expensive { // columns with references to the subsequent columns size_t n_block_column_size; size_t n_block_column = L.n_Find_BlockColumn(i, n_block_column_size); size_t n_block_column_base = L.n_BlockColumn_Base(n_block_column); size_t n_block_column_block_num = L.n_BlockColumn_Block_Num(n_block_column); // gets the corresponding block col (can use decrementing strategy like in C_direct) { CUberBlockMatrix::_TyMatrixXdRef cur_L_diag_block = L.t_Block_AtColumn(n_block_column, 0); double f_L_ii = cur_L_diag_block(i - n_block_column_base, i - n_block_column_base); double f_L_ii_inv = 1 / f_L_ii; // get the diagonal element double f_diag_sum = 0; if(b_single_block_size) { // compile-time constant; should get optimized away for(size_t j = 1; j < n_block_column_block_num; ++ j) { // all the blocks except the diagonal CUberBlockMatrix::_TyMatrixXdRef _block_i_j = L.t_Block_AtColumn(n_block_column, j); typename CUberBlockMatrix::CMakeMatrixRef<n_first_block_size, n_first_block_size>::_Ty block_i_j(_block_i_j.data()); size_t n_block_row_size = n_first_block_size;//block_i_j.rows(); size_t n_block_row_base = L.n_BlockRow_Base(L.n_Block_Row(n_block_column, j)); // look up the row in R (= column in L) f_diag_sum += r_marginals.row(i).segment<n_first_block_size>( n_block_row_base).dot(block_i_j.col(i - n_block_column_base)); // add dot of one column of the block with span of the current column of the marginals } } else { for(size_t j = 1; j < n_block_column_block_num; ++ j) { // all the blocks except the diagonal CUberBlockMatrix::_TyMatrixXdRef block_i_j = L.t_Block_AtColumn(n_block_column, j); size_t n_block_row_size = block_i_j.rows(); size_t n_block_row_base = L.n_BlockRow_Base(L.n_Block_Row(n_block_column, j)); // look up the row in R (= column in L) f_diag_sum += r_marginals.row(i).segment(n_block_row_base, // todo - try .block<1, Dynamic>(n_block_row_base, i, 0, n_block_row_size) or something like that? n_block_row_size).dot(block_i_j.col(i - n_block_column_base)); // t_odo - _FBS it // add dot of one column of the block with span of the current column of the marginals } } { size_t n_block_row_size = n_block_column_size;//cur_L_diag_block.rows(); // also symmetric size_t n_block_row_base = n_block_column_base; // it is symmetric // look up the row in R (= column in L) size_t n_subblock_col = i - n_block_column_base; size_t n_first_underdiag_elem = n_subblock_col + 1; size_t n_underdiag_elem_num = n_block_column_size - n_first_underdiag_elem; f_diag_sum += r_marginals.row(i).segment(n_first_underdiag_elem + n_block_row_base, n_underdiag_elem_num).dot(cur_L_diag_block.col(n_subblock_col).tail( n_block_row_size - n_first_underdiag_elem)); // todo - _FBS it // complete the sum //for(size_t j = i - n_block_column_base + 1; j < n_block_column_size; ++ j) // f_diag_sum += r_marginals(i, j + n_block_row_base) * cur_L_diag_block(j, i - n_block_column_base); // t_odo - make this a dot product on spans as well } #if 0 && defined(_DEBUG) double f_ref_diag_sum = 0; for(size_t j = i + 1; j < n; ++ j) { size_t n_block_row_size; size_t n_block_row = r_R.n_Find_BlockColumn(j, n_block_row_size); // R has symmetric layout size_t n_block_row_base = r_R.n_BlockColumn_Base(n_block_row); // look up the row in R (= column in L) CUberBlockMatrix::_TyMatrixXdRef block_i_j = L.t_GetBlock_Log(n_block_row, n_block_column); if(!block_i_j.cols()) continue; // no such block (zero, sparse) // todo - rewrite to form of a loop over *existing* blocks instead of looking up all blocks in dense manner double R_i_j = block_i_j(j - n_block_row_base, i - n_block_column_base); f_ref_diag_sum += r_marginals(i, j) * R_i_j; // this is bad, accesses the matrix by rows (need transpose) } _ASSERTE(fabs(f_ref_diag_sum - f_diag_sum) < std::max(1e-5, fabs(f_ref_diag_sum))); #endif // 0 && _DEBUG r_marginals(i, i) = f_L_ii_inv * (f_L_ii_inv - f_diag_sum); // calculate the diagonal element } for(size_t j = i; j > 0;) { // note that this is only required for the elements that are required on output or elements that are above nnz in the L or R factor -- j; // j is i in the book // i is k in the book size_t n_block_j_size; size_t n_block_j = L.n_Find_BlockColumn(j, n_block_j_size); size_t n_block_j_base = L.n_BlockColumn_Base(n_block_j); size_t n_block_j_block_num = L.n_BlockColumn_Block_Num(n_block_j); CUberBlockMatrix::_TyMatrixXdRef cur_L_diag_block = L.t_Block_AtColumn(n_block_j, 0); double f_L_jj = cur_L_diag_block(j - n_block_j_base, j - n_block_j_base); double f_L_jj_inv = 1 / f_L_jj; // get the diagonal block and element double f_diag_sum = 0; if(b_single_block_size) { // compile-time constant; should get optimized away for(size_t k = 1; k < n_block_j_block_num; ++ k) { // all the blocks except the diagonal CUberBlockMatrix::_TyMatrixXdRef _block_j_k = L.t_Block_AtColumn(n_block_j, k); typename CUberBlockMatrix::CMakeMatrixRef<n_first_block_size, n_first_block_size>::_Ty block_j_k(_block_j_k.data()); size_t n_block_row_size = n_first_block_size;//block_j_k.rows(); size_t n_block_row_base = L.n_BlockRow_Base(L.n_Block_Row(n_block_j, k)); // look up the row in R (= column in L) f_diag_sum += r_marginals.row(i).segment<n_first_block_size>( n_block_row_base).dot(block_j_k.col(j - n_block_j_base)); // add dot of one column of the block with span of the current column of the marginals } } else { for(size_t k = 1; k < n_block_j_block_num; ++ k) { // all the blocks except the diagonal CUberBlockMatrix::_TyMatrixXdRef block_j_k = L.t_Block_AtColumn(n_block_j, k); size_t n_block_row_size = block_j_k.rows(); size_t n_block_row_base = L.n_BlockRow_Base(L.n_Block_Row(n_block_j, k)); // look up the row in R (= column in L) f_diag_sum += r_marginals.row(i).segment(n_block_row_base, // todo - try .block<1, Dynamic>(n_block_row_base, j, 0, n_block_row_size) or something like that? n_block_row_size).dot(block_j_k.col(j - n_block_j_base)); // t_odo - _FBS it // add dot of one column of the block with span of the current column of the marginals } } { size_t n_block_row_size = n_block_j_size;//cur_L_diag_block.rows(); // also symmetric size_t n_block_row_base = n_block_j_base; // it is symmetric // look up the row in R (= column in L) size_t n_subblock_col = j - n_block_j_base; size_t n_first_underdiag_elem = n_subblock_col + 1; size_t n_underdiag_elem_num = n_block_j_size - n_first_underdiag_elem; f_diag_sum += r_marginals.row(i).segment(n_first_underdiag_elem + n_block_row_base, n_underdiag_elem_num).dot(cur_L_diag_block.col(n_subblock_col).tail( n_block_row_size - n_first_underdiag_elem)); // todo - _FBS it // complete the sum } #if 0 && defined(_DEBUG) double f_sum_Rjk_Cik = 0;//, f_sum_part0, f_sum_first_elem; for(size_t k = j + 1; k < n; ++ k) { size_t n_block_k_size; size_t n_block_k = L.n_Find_BlockColumn(k, n_block_k_size); size_t n_block_k_base = L.n_BlockColumn_Base(n_block_k); CUberBlockMatrix::_TyMatrixXdRef block_j_k = L.t_GetBlock_Log(n_block_k, n_block_j); if(!block_j_k.cols()) continue; // no such block (zero, sparse) // todo - rewrite to form of a loop over *existing* blocks f_sum_Rjk_Cik += r_marginals(i, k) * block_j_k(k - n_block_k_base, j - n_block_j_base); // product } _ASSERTE(fabs(f_sum_Rjk_Cik - f_diag_sum) < std::max(1e-5, fabs(f_sum_Rjk_Cik))); // use the "old" code to verify corectness #endif // 0 && _DEBUG /*{ // debugging of more recurrent formula size_t m = i + 1; // wha? size_t n_block_m_size; size_t n_block_m = R.n_Find_BlockColumn(m, n_block_m_size); size_t n_block_m_base = R.n_BlockColumn_Base(n_block_m); CUberBlockMatrix::_TyMatrixXdRef diag_block_m = R.t_BlockAt(R.n_BlockColumn_Block_Num(n_block_m) - 1, n_block_m); double f_R_mm = cur_L_diag_block(m - n_block_m_base, m - n_block_m_base); double f_sum_part1 = -(C_dep(m, m) * f_R_mm - 1 / f_R_mm); double f_sum_parted = f_sum_part0 + f_sum_part1; double f_sum_err = fabs(f_sum_parted - f_sum_Rjk_Cik); }*/ /*CUberBlockMatrix::_TyMatrixXdRef block_j_j = L.t_Block_AtColumn(n_block_j, L.n_BlockColumn_Block_Num(n_block_j) - 1); double L_j_j = block_j_j(j - n_block_j_base, j - n_block_j_base); // get another diagonal element of R*/ // unused r_marginals(i, j) = r_marginals(j, i) = -f_diag_sum * f_L_jj_inv; } // calculate C_dep_{n - 1 - i} by recurrent formula } } } /** * @brief naive recurrent right column band function that calculates dense marginals matrix * * @param[out] r_marginals is filled with the marginals matrix * @param[in] r_R is the Cholesky factor * @param[in] n_column_num is number of columns to calculate (in elements) * * @note This function throws std::bad_alloc. */ static void Calculate_DenseMarginals_LastNCols_Recurrent(Eigen::MatrixXd &r_marginals, const CUberBlockMatrix &r_R, size_t n_column_num) // throw(std::bad_alloc) { const size_t n = r_R.n_Column_Num(); // in elements r_marginals.resize(n, n); CUberBlockMatrix L; L.TransposeOf(r_R); // need transpose of R for(size_t i = n; i > 0;) { -- i; // here if(i == n - 1) { // the last row / col is easy, as it has no refs to the next ones Eigen::MatrixXd::ColXpr last_col = r_marginals.col(n - 1); size_t lb = r_R.n_BlockColumn_Num() - 1; CUberBlockMatrix::_TyConstMatrixXdRef last_block = r_R.t_Block_AtColumn(lb, r_R.n_BlockColumn_Block_Num(lb) - 1); last_col.setZero(); // !! last_col(n - 1) = 1 / last_block(last_block.rows() - 1, last_block.cols() - 1); r_R.UpperTriangular_Solve(&last_col(0), n); // all columns needed // calculates the whole last column of C r_marginals.row(n - 1).head(n - 1) = r_marginals.col(n - 1).head(n - 1).transpose(); // copy that also to the last row, to form the full matrix and not just upper-triangular } else { // columns with references to the subsequent columns //i = n - 1 - i; // fill the matrix from the back size_t n_block_column_size; size_t n_block_column = L.n_Find_BlockColumn(i, n_block_column_size); size_t n_block_column_base = L.n_BlockColumn_Base(n_block_column); // gets the corresponding block col (can use decrementing strategy like in C_direct) CUberBlockMatrix::_TyMatrixXdRef cur_L_diag_block = L.t_Block_AtColumn(n_block_column, 0); double f_R_ii = cur_L_diag_block(i - n_block_column_base, i - n_block_column_base); double f_R_ii_inv = 1 / f_R_ii; // get the diagonal element double f_diag_sum = 0; for(size_t j = i + 1; j < n; ++ j) { size_t n_block_row_size; size_t n_block_row = L.n_Find_BlockColumn(j, n_block_row_size); // R has symmetric layout size_t n_block_row_base = L.n_BlockColumn_Base(n_block_row); // look up the row in R (= column in L) CUberBlockMatrix::_TyMatrixXdRef block_i_j = L.t_GetBlock_Log(n_block_row, n_block_column); if(!block_i_j.cols()) continue; // no such block (zero, sparse) // todo - rewrite to form of a loop over *existing* blocks instead of looking up all blocks in dense manner double R_i_j = block_i_j(j - n_block_row_base, i - n_block_column_base); f_diag_sum += r_marginals(i, j) * R_i_j; // this is bad, accesses the matrix by rows (need transpose) } r_marginals(i, i) = f_R_ii_inv * (f_R_ii_inv - f_diag_sum); // calculate the diagonal element for(size_t j = i; j > 0;) { -- j; // j is i in the book // i is k in the book size_t n_block_j_size; size_t n_block_j = L.n_Find_BlockColumn(j, n_block_j_size); size_t n_block_j_base = L.n_BlockColumn_Base(n_block_j); double f_sum_Rjk_Cik = 0/*, f_sum_part0, f_sum_first_elem*/; for(size_t k = j + 1; k < n; ++ k) { //if(k == i + 1) // f_sum_part0 = f_sum_Rjk_Cik; // note that the second half of the sum might be actually recurrent and easy to recover from the previous diagonal elements // less code this way size_t n_block_k_size; size_t n_block_k = L.n_Find_BlockColumn(k, n_block_k_size); size_t n_block_k_base = L.n_BlockColumn_Base(n_block_k); CUberBlockMatrix::_TyMatrixXdRef block_j_k = L.t_GetBlock_Log(n_block_k, n_block_j); if(!block_j_k.cols()) continue; // no such block (zero, sparse) // todo - rewrite to form of a loop over *existing* blocks f_sum_Rjk_Cik += r_marginals(i, k) * block_j_k(k - n_block_k_base, j - n_block_j_base); // product //if(k == i) // f_sum_first_elem = C_dep(i, k) * block_j_k(k - n_block_k_base, j - n_block_j_base); // save that as well } // note that this is a single loop, which skips iterating over element i /*{ // debugging of more recurrent formula size_t m = i + 1; // wha? size_t n_block_m_size; size_t n_block_m = R.n_Find_BlockColumn(m, n_block_m_size); size_t n_block_m_base = R.n_BlockColumn_Base(n_block_m); CUberBlockMatrix::_TyMatrixXdRef diag_block_m = R.t_BlockAt(R.n_BlockColumn_Block_Num(n_block_m) - 1, n_block_m); double f_R_mm = cur_L_diag_block(m - n_block_m_base, m - n_block_m_base); double f_sum_part1 = -(C_dep(m, m) * f_R_mm - 1 / f_R_mm); double f_sum_parted = f_sum_part0 + f_sum_part1; double f_sum_err = fabs(f_sum_parted - f_sum_Rjk_Cik); }*/ CUberBlockMatrix::_TyMatrixXdRef block_j_j = L.t_Block_AtColumn(n_block_j, L.n_BlockColumn_Block_Num(n_block_j) - 1); double R_j_j = block_j_j(j - n_block_j_base, j - n_block_j_base); // get another diagonal element of R r_marginals(i, j) = r_marginals(j, i) = f_sum_Rjk_Cik / -R_j_j; } // calculate C_dep_{n - 1 - i} by recurrent formula //printf("%d, error: %g%6s\r", i, (C_dep.col(i) - C.col(i)).norm(), ""); //i = n - 1 - i; // go back to i } } } /** * @brief band diagonal function that calculates dense marginals matrix * * @param[out] r_marginals is filled with the marginals matrix * @param[in] r_R is the Cholesky factor * @param[in] n_diag_band_width is number of elements to calculate around the diagonal * * @note This function throws std::bad_alloc. */ static void Calculate_DiagonalMarginals(Eigen::MatrixXd &r_marginals, const CUberBlockMatrix &r_R, size_t n_diag_band_width = 3) // throw(std::bad_alloc) { const size_t n = r_R.n_Column_Num(); // in elements r_marginals.resize(n, n); Eigen::MatrixXd prev_column_buffer(n, n_diag_band_width); // todo - cache align, will likely access the same elements in different vectors (needs to be implemented using a strided map) // previous columns for(size_t i = 0, n_block_col = -1, n_col_remains = 1; i < n; ++ i) { if(!(-- n_col_remains)) // triggers in the first iteration, loads up column width n_col_remains = r_R.n_BlockColumn_Column_Num(++ n_block_col); // if done before, it avoids both referencing block col 0 in an empty matrix // and determining no. of columns in one past the last block column at the end Eigen::MatrixXd::ColXpr cur_row = prev_column_buffer.col(i % n_diag_band_width); cur_row.setZero(); cur_row(i) = 1; // !! r_R.UpperTriangularTranspose_Solve(&cur_row(0), n, n_block_col); // forward substitution; can also skip to the current column // calculate a row of the R_inv? maybe? seems outright wrong, but actually gives a rather correct answer. _ASSERTE(cur_row.head(i).norm() == 0); // everything above i is zero (true) size_t n_block_org = r_R.n_BlockColumn_Base(n_block_col); r_marginals(i, i) = cur_row.tail(n - i).squaredNorm(); //for(size_t j = std::max(n_diag_band_width, i) - n_diag_band_width; j < i; ++ j) { // could do two loops; one for i < n_diag_band_width and the one for the rest (low prio, almost doesn't matter) for(size_t j = i - n_block_org/*i % n_diag_band_width*/; j < i; ++ j) { // thin block diagonal, works even for mixed-size vertices Eigen::MatrixXd::ColXpr prev_row = prev_column_buffer.col(j % n_diag_band_width); r_marginals(j, i) = r_marginals(i, j) = prev_row.tail(n - j).dot(cur_row.tail(n - j)); } // calculate banded diagonal; this works well, the banded diagonal is identical to the one in C or C_full // todo - store in a smaller matrix (will only contain block cross-covs of the vertices; that involves further ~50% reduction in computation) } } /** * @brief parallel band diagonal function that calculates dense marginals matrix * * @param[out] r_marginals is filled with the marginals matrix * @param[in] r_R is the Cholesky factor * @param[in] n_diag_band_width is number of elements to calculate around the diagonal * * @note This function throws std::bad_alloc. */ static void Calculate_DiagonalMarginals_Parallel(Eigen::MatrixXd &r_marginals, const CUberBlockMatrix &r_R, size_t n_diag_band_width = 3) // throw(std::bad_alloc) { const size_t n = r_R.n_Column_Num(); // in elements r_marginals.resize(n, n); #ifdef _OPENMP #pragma omp parallel { int n_tid = omp_get_thread_num(); int n_thread_num = omp_get_num_threads(); size_t n_start = n_tid * (n / n_thread_num); size_t n_end = (n_tid + 1 < n_thread_num)? n_start + n / n_thread_num : n; // split to bands to be processed in parallel Calculate_DiagonalMarginals(r_marginals, r_R, n_start, n_end, n_diag_band_width); // process in parallel } #else // _OPENMP Calculate_DiagonalMarginals(r_marginals, r_R, n_diag_band_width); #endif // _OPENMP } /** * @brief helper to the parallel band diagonal function that calculates dense marginals matrix * * @param[out] r_marginals is filled with the marginals matrix * @param[in] r_R is the Cholesky factor * @param[in] n_start_column is zero-based index of the first column (in elements) * @param[in] n_end_column is zero-based index of one past the last column (in elements) * @param[in] n_diag_band_width is number of elements to calculate around the diagonal * * @note This function throws std::bad_alloc. */ static void Calculate_DiagonalMarginals(Eigen::MatrixXd &r_marginals, const CUberBlockMatrix &r_R, size_t n_start_column, size_t n_end_column, size_t n_diag_band_width = 3) // throw(std::bad_alloc) { _ASSERTE(n_start_column <= n_end_column); // should be a valid range const size_t n = r_R.n_Column_Num(); // in elements r_marginals.resize(n, n); // might work in a group, but then it is already allocated and it is a no-op Eigen::MatrixXd prev_column_buffer(n, n_diag_band_width); // todo - cache align, will likely access the same elements in different vectors (needs to be implemented using a strided map) // previous columns for(size_t i = 0, n_block_col = -1, n_col_remains = 1; i < std::min(n, n_end_column); ++ i) { if(!(-- n_col_remains)) // triggers in the first iteration, loads up column width n_col_remains = r_R.n_BlockColumn_Column_Num(++ n_block_col); // if done before, it avoids both referencing block col 0 in an empty matrix // and determining no. of columns in one past the last block column at the end if(i + n_diag_band_width < n_start_column) continue; // todo - do a proper binary search for the column Eigen::MatrixXd::ColXpr cur_row = prev_column_buffer.col(i % n_diag_band_width); cur_row.setZero(); cur_row(i) = 1; // !! r_R.UpperTriangularTranspose_Solve(&cur_row(0), n, n_block_col); // forward substitution; can also skip to the current column // calculate a row of the R_inv? maybe? seems outright wrong, but actually gives a rather correct answer. _ASSERTE(cur_row.head(i).norm() == 0); // everything above i is zero (true) size_t n_block_org = r_R.n_BlockColumn_Base(n_block_col); r_marginals(i, i) = cur_row.tail(n - i).squaredNorm(); //for(size_t j = std::max(n_diag_band_width, i) - n_diag_band_width; j < i; ++ j) { // could do two loops; one for i < n_diag_band_width and the one for the rest (low prio, almost doesn't matter) for(size_t j = i - n_block_org/*i % n_diag_band_width*/; j < i; ++ j) { // thin block diagonal, works even for mixed-size vertices Eigen::MatrixXd::ColXpr prev_row = prev_column_buffer.col(j % n_diag_band_width); r_marginals(j, i) = r_marginals(i, j) = prev_row.tail(n - j).dot(cur_row.tail(n - j)); } // calculate banded diagonal; this works well, the banded diagonal is identical to the one in C or C_full // todo - store in a smaller matrix (will only contain block cross-covs of the vertices; that involves further ~50% reduction in computation) } } /** * @brief benchmark of marginal calculation functions * * @param[in] r_R is the Cholesky factor * @param[in] n_diag_band_width is band width for diagonal and colmn band functions * * @note This function throws std::bad_alloc. */ static void Marginals_Test(const CUberBlockMatrix &r_R, size_t n_diag_band_width = 3) // throw(std::bad_alloc) { if(r_R.n_Column_Num() < n_diag_band_width) { fprintf(stderr, "error: matrix too small for banded tests (%d x %d): skipping\n", int(r_R.n_Row_Num()), int(r_R.n_Column_Num())); return; } // might not be handled correctly Eigen::MatrixXd C_ref, C_slow, C_fast, C_rec, C_diag, C_diag_para, C_rband_slow, C_rband_fast, C_rband_rec; CTimer t; double f_time_ref = 0, f_time_slow = 0, f_time_rec = 0, f_time_diag = 0, f_time_diag_para = 0, f_time_rbslow = 0, f_time_rbfast = 0, f_time_rbrec = 0, f_time_fast = 0, f_time_transpose = 0; size_t n_ref_pass_num = 0, n_slow_pass_num = 0, n_rec_pass_num = 0, n_diag_pass_num = 0, n_diag_para_pass_num = 0, n_rbslow_pass_num = 0, n_rbfast_pass_num = 0, n_rbrec_pass_num = 0, n_fast_pass_num = 0, n_transpose_pass_num = 0; for(;;) { double f_start = t.f_Time(); { CUberBlockMatrix L; L.TransposeOf(r_R); } ++ n_transpose_pass_num; f_time_transpose += t.f_Time() - f_start; if((f_time_transpose >= 1 && n_transpose_pass_num >= 10) || f_time_transpose > 4) break; } f_time_transpose /= n_transpose_pass_num; for(;;) { double f_start = t.f_Time(); Calculate_DenseMarginals_Ref(C_ref, r_R); ++ n_ref_pass_num; f_time_ref += t.f_Time() - f_start; if((f_time_ref >= 1 && n_ref_pass_num >= 10) || f_time_ref > 4) break; } f_time_ref /= n_ref_pass_num; /*for(;;) { double f_start = t.f_Time(); Calculate_DenseMarginals_Slow(C_slow, r_R); ++ n_slow_pass_num; f_time_slow += t.f_Time() - f_start; if((f_time_slow >= 1 && n_slow_pass_num >= 10) || f_time_slow > 4) break; } f_time_slow /= n_slow_pass_num;*/ std::vector<size_t> fake_perm(r_R.n_BlockColumn_Num()); for(size_t i = 0, n = fake_perm.size(); i < n; ++ i) fake_perm[i] = i; // build a fake permutation on R (we don't have the original one here) for(;;) { double f_start = t.f_Time(); Calculate_DenseMarginals_Fast(C_fast, r_R, &fake_perm[0], fake_perm.size()); ++ n_fast_pass_num; f_time_fast += t.f_Time() - f_start; if((f_time_fast >= 1 && n_fast_pass_num >= 10) || f_time_fast > 4) break; } f_time_fast /= n_fast_pass_num; for(;;) { double f_start = t.f_Time(); Calculate_DenseMarginals_Recurrent(C_rec, r_R); //Calculate_DenseMarginals_Recurrent_Devel(C_rec, C_ref, r_R); ++ n_rec_pass_num; f_time_rec += t.f_Time() - f_start; if((f_time_rec >= 1 && n_rec_pass_num >= 10) || f_time_rec > 4) break; } f_time_rec /= n_rec_pass_num; /*for(;;) { double f_start = t.f_Time(); Calculate_DiagonalMarginals(C_diag, r_R, n_diag_band_width); ++ n_diag_pass_num; f_time_diag += t.f_Time() - f_start; if((f_time_diag >= 1 && n_diag_pass_num >= 10) || f_time_diag > 4) break; } f_time_diag /= n_diag_pass_num; for(;;) { double f_start = t.f_Time(); Calculate_DiagonalMarginals_Parallel(C_diag_para, r_R, n_diag_band_width); ++ n_diag_para_pass_num; f_time_diag_para += t.f_Time() - f_start; if((f_time_diag_para >= 1 && n_diag_para_pass_num >= 10) || f_time_diag_para > 4) break; } f_time_diag_para /= n_diag_para_pass_num; for(;;) { double f_start = t.f_Time(); Calculate_DenseMarginals_LastNColumns_Slow(C_rband_slow, r_R, n_diag_band_width); ++ n_rbslow_pass_num; f_time_rbslow += t.f_Time() - f_start; if((f_time_rbslow >= 1 && n_rbslow_pass_num >= 10) || f_time_rbslow > 4) break; } f_time_rbslow /= n_rbslow_pass_num;*/ for(;;) { double f_start = t.f_Time(); Calculate_DenseMarginals_LastNColumns_Fast(C_rband_fast, r_R, n_diag_band_width, &fake_perm[0], fake_perm.size()); ++ n_rbfast_pass_num; f_time_rbfast += t.f_Time() - f_start; if((f_time_rbfast >= 1 && n_rbfast_pass_num >= 10) || f_time_rbfast > 4) break; } f_time_rbfast /= n_rbfast_pass_num; /*for(;;) { double f_start = t.f_Time(); Calculate_DenseMarginals_LastNCols_Recurrent(C_rband_rec, r_R, n_diag_band_width); ++ n_rbrec_pass_num; f_time_rbrec += t.f_Time() - f_start; if((f_time_rbrec >= 1 && n_rbrec_pass_num >= 10) || f_time_rbrec > 4) break; } f_time_rbrec /= n_rbrec_pass_num;*/ printf("%6s took %.3f msec\n", "ref", f_time_ref * 1000); //printf("%6s took %.3f msec\n", "slow", f_time_slow * 1000); printf("%6s took %.3f msec\n", "fast", f_time_fast * 1000); printf("%6s took %.3f msec\n", "rec", f_time_rec * 1000); printf("%6s took %.3f msec\n", "\ttranspose in rec", f_time_transpose * 1000); /*printf("%6s took %.3f msec\n", "diag", f_time_diag * 1000); printf("%6s took %.3f msec\n", "diag-p", f_time_diag_para * 1000); printf("%6s took %.3f msec\n", "rbslow", f_time_rbslow * 1000); printf("%6s took %.3f msec\n", "rbfast", f_time_rbfast * 1000); printf("%6s took %.3f msec\n", "rbrec", f_time_rbrec * 1000);*/ // print times /*printf("norm of C_slow - C_ref is %g\n", (C_slow - C_ref).norm());*/ printf("norm of C_fast - C_ref is %g\n", (C_fast - C_ref).norm()); printf("norm of C_rec - C_ref is %g\n", (C_rec - C_ref).norm()); /*printf("norm of the diagonal of C_diag - C_ref is %g\n", (C_diag.diagonal() - C_ref.diagonal()).norm()); printf("norm of the diagonal of C_diag_para - C_ref is %g\n", (C_diag_para.diagonal() - C_ref.diagonal()).norm()); printf("norm of the last N columns of C_rband_slow - C_ref is %g\n", (C_rband_slow.rightCols(n_diag_band_width) - C_ref.rightCols(n_diag_band_width)).norm());*/ printf("norm of the last N columns of C_rband_fast - C_ref is %g\n", (C_rband_fast.rightCols(n_diag_band_width) - C_ref.rightCols(n_diag_band_width)).norm()); /*printf("norm of the last N columns of C_rband_rec - C_ref is %g\n", (C_rband_rec.rightCols(n_diag_band_width) - C_ref.rightCols(n_diag_band_width)).norm());*/ // print precision } /** * @brief benchmark of marginal FBS calculation functions * * @tparam CMatrixBlockSizeList is a list of possible matrix block sizes * * @param[in] r_R is the Cholesky factor * @param[in] n_diag_band_width is band width for diagonal and colmn band functions * * @note This function throws std::bad_alloc. */ template <class CBlockMatrixTypelist> static void Marginals_Test_FBS(const CUberBlockMatrix &r_R, size_t n_diag_band_width = 3) // throw(std::bad_alloc) { if(r_R.n_Column_Num() < n_diag_band_width) { fprintf(stderr, "error: matrix too small for banded tests (%d x %d): skipping\n", int(r_R.n_Row_Num()), int(r_R.n_Column_Num())); return; } // might not be handled correctly Eigen::MatrixXd C_ref, C_slow, C_fast, C_diag, C_diag_para, C_rband_slow, C_rband_fast, C_rband_rec; CUberBlockMatrix C_rec; CTimer t; double f_time_ref = 0, f_time_slow = 0, f_time_rec = 0, f_time_diag = 0, f_time_diag_para = 0, f_time_rbslow = 0, f_time_rbfast = 0, f_time_rbrec = 0, f_time_fast = 0; size_t n_ref_pass_num = 0, n_slow_pass_num = 0, n_rec_pass_num = 0, n_diag_pass_num = 0, n_diag_para_pass_num = 0, n_rbslow_pass_num = 0, n_rbfast_pass_num = 0, n_rbrec_pass_num = 0, n_fast_pass_num = 0; for(;;) { double f_start = t.f_Time(); Calculate_DenseMarginals_Ref(C_ref, r_R); ++ n_ref_pass_num; f_time_ref += t.f_Time() - f_start; if((f_time_ref >= 1 && n_ref_pass_num >= 10) || f_time_ref > 4) break; } f_time_ref /= n_ref_pass_num; /*for(;;) { double f_start = t.f_Time(); Calculate_DenseMarginals_Slow(C_slow, r_R); ++ n_slow_pass_num; f_time_slow += t.f_Time() - f_start; if((f_time_slow >= 1 && n_slow_pass_num >= 10) || f_time_slow > 4) break; } f_time_slow /= n_slow_pass_num;*/ std::vector<size_t> fake_perm(r_R.n_BlockColumn_Num()); for(size_t i = 0, n = fake_perm.size(); i < n; ++ i) fake_perm[i] = i; // build a fake permutation on R (we don't have the original one here) CMatrixOrdering mord; mord.p_InvertOrdering(&fake_perm.front(), fake_perm.size()); for(;;) { double f_start = t.f_Time(); Calculate_DenseMarginals_Fast_FBS<CBlockMatrixTypelist>(C_fast, r_R, &fake_perm[0], fake_perm.size()); ++ n_fast_pass_num; f_time_fast += t.f_Time() - f_start; if((f_time_fast >= 1 && n_fast_pass_num >= 10) || f_time_fast > 4) break; } f_time_fast /= n_fast_pass_num; for(;;) { /*#ifdef _DEBUG C_rec = C_ref; // insert ground truth #endif // _DEBUG*/ double f_start = t.f_Time(); Calculate_DenseMarginals_Recurrent_FBS<CBlockMatrixTypelist>(C_rec, r_R, mord, mpart_Diagonal); ++ n_rec_pass_num; f_time_rec += t.f_Time() - f_start; if((f_time_rec >= 1 && n_rec_pass_num >= 10) || f_time_rec > 4) break; } f_time_rec /= n_rec_pass_num; /*for(;;) { double f_start = t.f_Time(); Calculate_DiagonalMarginals(C_diag, r_R, n_diag_band_width); ++ n_diag_pass_num; f_time_diag += t.f_Time() - f_start; if((f_time_diag >= 1 && n_diag_pass_num >= 10) || f_time_diag > 4) break; } f_time_diag /= n_diag_pass_num; for(;;) { double f_start = t.f_Time(); Calculate_DiagonalMarginals_Parallel(C_diag_para, r_R, n_diag_band_width); ++ n_diag_para_pass_num; f_time_diag_para += t.f_Time() - f_start; if((f_time_diag_para >= 1 && n_diag_para_pass_num >= 10) || f_time_diag_para > 4) break; } f_time_diag_para /= n_diag_para_pass_num; for(;;) { double f_start = t.f_Time(); Calculate_DenseMarginals_LastNColumns_Slow(C_rband_slow, r_R, n_diag_band_width); ++ n_rbslow_pass_num; f_time_rbslow += t.f_Time() - f_start; if((f_time_rbslow >= 1 && n_rbslow_pass_num >= 10) || f_time_rbslow > 4) break; } f_time_rbslow /= n_rbslow_pass_num;*/ for(;;) { double f_start = t.f_Time(); Calculate_DenseMarginals_LastNColumns_Fast_FBS<CBlockMatrixTypelist>(C_rband_fast, r_R, n_diag_band_width, &fake_perm[0], fake_perm.size()); ++ n_rbfast_pass_num; f_time_rbfast += t.f_Time() - f_start; if((f_time_rbfast >= 1 && n_rbfast_pass_num >= 10) || f_time_rbfast > 4) break; } f_time_rbfast /= n_rbfast_pass_num; /*for(;;) { double f_start = t.f_Time(); Calculate_DenseMarginals_LastNCols_Recurrent(C_rband_rec, r_R, n_diag_band_width); ++ n_rbrec_pass_num; f_time_rbrec += t.f_Time() - f_start; if((f_time_rbrec >= 1 && n_rbrec_pass_num >= 10) || f_time_rbrec > 4) break; } f_time_rbrec /= n_rbrec_pass_num;*/ printf("%10s took %.3f msec\n", "ref", f_time_ref * 1000); printf("%10s took %.3f msec (calculated " PRIsize " nnz)\n", "rec_FBS", f_time_rec * 1000, C_rec.n_NonZero_Num()); printf("%10s took %.3f msec\n", "fast_FBS", f_time_fast * 1000); printf("%10s took %.3f msec\n", "rbfast_FBS", f_time_rbfast * 1000); // print times /*printf("norm of C_slow - C_ref is %g\n", (C_slow - C_ref).norm());*/ printf("norm of C_fast_FBS - C_ref is %g\n", (C_fast - C_ref).norm()); printf("norm of C_rec_FBS - C_ref is %g\n", f_IncompleteDifference(C_ref, C_rec)); //printf("norm of the diagonal of C_rec_FBS - C_ref is %g\n", (C_rec.diagonal() - C_ref.diagonal()).norm()); /*printf("norm of the diagonal of C_diag - C_ref is %g\n", (C_diag.diagonal() - C_ref.diagonal()).norm()); printf("norm of the diagonal of C_diag_para - C_ref is %g\n", (C_diag_para.diagonal() - C_ref.diagonal()).norm()); printf("norm of the last N columns of C_rband_slow - C_ref is %g\n", (C_rband_slow.rightCols(n_diag_band_width) - C_ref.rightCols(n_diag_band_width)).norm());*/ printf("norm of the last N columns of C_rband_fast_FBS - C_ref is %g\n", (C_rband_fast.rightCols(n_diag_band_width) - C_ref.rightCols(n_diag_band_width)).norm()); /*printf("norm of the last N columns of C_rband_rec - C_ref is %g\n", (C_rband_rec.rightCols(n_diag_band_width) - C_ref.rightCols(n_diag_band_width)).norm());*/ // print precision } /** * @brief calculates norm of difference between dense ground truth and sparse (incomplete) solution * * @param[in] r_matrix is full matrix containing the marginals * @param[in] r_incomplete is partially calculated sparse block matrix with the same marginals * * @return Returns norm of differences of the two matrices, ignoting the parts of the * dense matrix where the sparse block matrix doesn't have values. */ static double f_IncompleteDifference(const Eigen::MatrixXd &r_matrix, const CUberBlockMatrix &r_incomplete) { _ASSERTE(r_matrix.rows() == r_incomplete.n_Row_Num() && r_matrix.cols() == r_incomplete.n_Column_Num()); // should be same size double f_error = 0; for(size_t i = 0, n = r_incomplete.n_BlockColumn_Num(); i < n; ++ i) { size_t n_column_base = r_incomplete.n_BlockColumn_Base(i); for(size_t j = 0, m = r_incomplete.n_BlockColumn_Block_Num(i); j < m; ++ j) { size_t n_block_row = r_incomplete.n_Block_Row(i, j); size_t n_row_base = r_incomplete.n_BlockRow_Base(n_block_row); CUberBlockMatrix::_TyConstMatrixXdRef t_block = r_incomplete.t_Block_AtColumn(i, j); f_error += (r_matrix.block(n_row_base, n_column_base, t_block.rows(), t_block.cols()) - t_block).squaredNorm(); } } // sum up error under blocks that were evaluated return sqrt(f_error); } /** * @brief calculates norm of difference between two sparse (incomplete) solutions * * @param[in] r_incomplete_a is partially calculated sparse block matrix containing the marginals * @param[in] r_incomplete_b is partially calculated sparse block matrix with the same marginals * * @return Returns norm of differences of the two matrices, ignoting the parts of the * dense matrix where the sparse block matrix doesn't have values. */ static double f_IncompleteDifference(size_t &r_n_common_block_num, const CUberBlockMatrix &r_incomplete_a, const CUberBlockMatrix &r_incomplete_b) { _ASSERTE(r_incomplete_a.n_Row_Num() == r_incomplete_b.n_Row_Num() && r_incomplete_a.n_Column_Num() == r_incomplete_b.n_Column_Num()); _ASSERTE(r_incomplete_a.b_EqualLayout(r_incomplete_b)); // should be same size and the same layout r_n_common_block_num = 0; double f_error = 0; for(size_t i = 0, n = r_incomplete_b.n_BlockColumn_Num(); i < n; ++ i) { for(size_t j = 0, m = r_incomplete_a.n_BlockColumn_Block_Num(i), k = 0, o = r_incomplete_b.n_BlockColumn_Block_Num(i); j < m && k < o;) { size_t n_block_row_j = r_incomplete_a.n_Block_Row(i, j), n_block_row_k = r_incomplete_b.n_Block_Row(i, k); if(n_block_row_j == n_block_row_k) { CUberBlockMatrix::_TyConstMatrixXdRef t_block_j = r_incomplete_a.t_Block_AtColumn(i, j), t_block_k = r_incomplete_b.t_Block_AtColumn(i, k); f_error += (t_block_j - t_block_k).squaredNorm(); ++ r_n_common_block_num; ++ j; ++ k; } else if(n_block_row_j < n_block_row_k) ++ j; else ++ k; } } // sum up error under blocks that were evaluated return sqrt(f_error); } #if 0 void Marginals_DevelCode() { const CUberBlockMatrix &lambda = solver.r_Lambda(); // get system matrix cs *p_lam = lambda.p_Convert_to_Sparse(); FILE *p_fw = fopen("lambda.txt", "w"); CDebug::Print_SparseMatrix_in_MatlabFormat(p_fw, p_lam, "lambda = ", "' % this is supposed to be upper-tri\n"); fclose(p_fw); cs_spfree(p_lam); // dump the optimized lambda CUberBlockMatrix R; R.CholeskyOf(lambda); // take Cholesky const size_t n = R.n_Column_Num(); // in elements Eigen::MatrixXd R_inv(n, n); // R_inv = S for(size_t i = 0, n_block_col = -1, n_col_remains = 1; i < n; ++ i) { double *p_column = &R_inv.col(i)(0); memset(p_column, 0, n * sizeof(double)); p_column[i] = 1; if(!(-- n_col_remains)) // triggers in the first iteration, loads up column width n_col_remains = R.n_BlockColumn_Column_Num(++ n_block_col); R.UpperTriangular_Solve(p_column, n, n_block_col); // backsub, only the nonzero part of the column (started at (block) column which contains column i, with no loss of generality) } Eigen::MatrixXd C = R_inv.lazyProduct(R_inv.transpose()); // C = SS^T // calculate the covariance (assume that this is correct) Eigen::MatrixXd C_full(n, n), C_direct(n, n), C_dep(n, n); C_full.setZero(); C_direct.setZero(); C_dep.setZero(); // only the diagonal of C size_t n_diag_band_width = 3; // = max column width occuring in the matrix (max vertex dim) // size of the diagonal (also the size of the buffer for the past rows of inv(R)) Eigen::MatrixXd prev_column_buffer(n, n_diag_band_width); // todo - cache align, will likely access the same elements in different vectors // previous columns CUberBlockMatrix R_tr; R.TransposeTo(R_tr); // need transpose of R //Eigen::MatrixXd R_inv(n, n); // R_inv = S for(size_t i = 0, n_block_col = -1, n_col_remains = 1; i < n; ++ i) { double *p_column = &R_inv.col(i)(0); // get dense column data from the Eigen matrix (should be packed) memset(p_column, 0, n * sizeof(double)); p_column[i] = 1; // make a column vector with a single 1 in it if(!(-- n_col_remains)) // triggers in the first iteration, loads up column width n_col_remains = R.n_BlockColumn_Column_Num(++ n_block_col); // if done before, it avoids both referencing block col 0 in an empty matrix // and determining no. of columns in one past the last block column at the end size_t UNUSED(n_block_column_size); size_t n_block_column = R.n_Find_BlockColumn(i, n_block_column_size); _ASSERTE(n_block_col == n_block_column); // should be the same // get which block column contains column i (optimize this away, probably need to use it when resuming) _ASSERTE(n_block_column_size <= n_diag_band_width); // make this into a run-time check in the production code // make sure it is not longer than the diagonal (otherwise we will not have enough backlog to calculate all the off-diagonal elements) R.UpperTriangular_Solve(p_column, n, n_block_col); // backsub, only the nonzero part of the column (started at (block) column which contains column i, with no loss of generality) // this seems to be O(i) divisions + O(nnz) MADs in the given (block) column range // that sums up to O(n^2/2) divisions + O(nnz log(nnz))?? MADs ... some quadratute anyways std::vector<double> backsub_test(n, 0); backsub_test[i] = 1; // !! R.UpperTriangular_Solve(&backsub_test[0], n); // full backsub _ASSERTE(!memcmp(p_column, &backsub_test[0], n * sizeof(double))); // make sure that the result is correct _ASSERTE((Eigen::Map<Eigen::VectorXd, Eigen::Unaligned>(p_column + i + 1, n - i - 1).norm() == 0)); // double pars required because of the comma in Map params // everything below i is zero (true) for(size_t k = 0; k <= i; ++ k) { for(size_t j = 0; j <= i; ++ j) C_full(j, k) += p_column[j] * p_column[k]; // it is symmetric, indexing arbitrary } // accumulate the entries of the covariace matrix. this is O(n^3/2) MADs for the full matrix // note that to calculate even only the diagonal, we need full columns Eigen::MatrixXd::ColXpr cur_row = prev_column_buffer.col(i % n_diag_band_width); cur_row.setZero(); cur_row(i) = 1; // !! R.UpperTriangularTranspose_Solve(&cur_row(0), n, n_block_col); // forward substitution; can also skip to the current column // calculate a row of the R_inv? maybe? seems outright wrong, but actually gives a rather correct answer. _ASSERTE(cur_row.head(i).norm() == 0); // everything above i is zero (true) size_t n_block_org = R.n_BlockColumn_Base(n_block_col); C_direct(i, i) = cur_row.tail(n - i).squaredNorm(); //for(size_t j = std::max(n_diag_band_width, i) - n_diag_band_width; j < i; ++ j) { // could do two loops; one for i < n_diag_band_width and the one for the rest (low prio, almost doesn't matter) for(size_t j = i - n_block_org/*i % n_diag_band_width*/; j < i; ++ j) { // thin block diagonal, works even for mixed-size vertices Eigen::MatrixXd::ColXpr prev_row = prev_column_buffer.col(j % n_diag_band_width); C_direct(j, i) = C_direct(i, j) = prev_row.tail(n - j).dot(cur_row.tail(n - j)); } // calculate banded diagonal; this works well, the banded diagonal is identical to the one in C or C_full // todo - store in a smaller matrix (will only contain block cross-covs of the vertices; that involves further ~50% reduction in computation) //C_direct(i, i) = 0; //for(size_t j = i; j < n; ++ j) // C_direct(i, i) += p_column[j] * p_column[j]; // O(n^2/2) for the diagonal of the full matrix // the diagonal is just the stuff squared. to calculate off-diagonals, we need to cache previous columns as well // also the columns are independent and there is no communication involved; this is easily parallelised, even on GPU if(!i) { // the last row / col is easy, as it has no refs to the next ones Eigen::MatrixXd::ColXpr last_col = C_dep.col(n - 1); size_t lb = R.n_BlockColumn_Num() - 1; CUberBlockMatrix::_TyMatrixXdRef last_block = R.t_BlockAt(R.n_BlockColumn_Block_Num(lb) - 1, lb); last_col(n - 1) = 1 / last_block(last_block.rows() - 1, last_block.cols() - 1); R.UpperTriangular_Solve(&last_col(0), n); // all columns needed // calculates the whole last column of C C_dep.row(n - 1).head(n - 1) = C_dep.col(n - 1).head(n - 1).transpose(); // copy that also to the last row, to form the full matrix and not just upper-triangular } else { // columns with references to the subsequent columns i = n - 1 - i; // fill the matrix from the back size_t n_block_column_size; size_t n_block_column = R.n_Find_BlockColumn(i, n_block_column_size); size_t n_block_column_base = R.n_BlockColumn_Base(n_block_column); // gets the corresponding block col (can use decrementing strategy like in C_direct) CUberBlockMatrix::_TyMatrixXdRef cur_L_diag_block = R.t_BlockAt(R.n_BlockColumn_Block_Num(n_block_column) - 1, n_block_column); double f_R_ii = cur_L_diag_block(i - n_block_column_base, i - n_block_column_base); double f_R_ii_inv = 1 / f_R_ii; // get the diagonal element double f_diag_sum = 0; for(size_t j = i + 1; j < n; ++ j) { size_t n_block_row_size; size_t n_block_row = R.n_Find_BlockColumn(j, n_block_row_size); // R has symmetric layout size_t n_block_row_base = R.n_BlockColumn_Base(n_block_row); // look up the row in R (= column in R_tr) CUberBlockMatrix::_TyMatrixXdRef block_i_j = R_tr.t_GetBlock_Log(n_block_row, n_block_column); if(!block_i_j.cols()) continue; // no such block (zero, sparse) // todo - rewrite to form of a loop over *existing* blocks instead of looking up all blocks in dense manner double R_i_j = block_i_j(j - n_block_row_base, i - n_block_column_base); f_diag_sum += C_dep(i, j) * R_i_j; // this is bad, accesses the matrix by rows (need transpose) } C_dep(i, i) = f_R_ii_inv * (f_R_ii_inv - f_diag_sum); for(size_t j = i; j > 0;) { -- j; // j is i in the book // i is k in the book size_t n_block_j_size; size_t n_block_j = R.n_Find_BlockColumn(j, n_block_j_size); size_t n_block_j_base = R.n_BlockColumn_Base(n_block_j); double f_sum_Rjk_Cik = 0/*, f_sum_part0, f_sum_first_elem*/; for(size_t k = j + 1; k < n; ++ k) { //if(k == i + 1) // f_sum_part0 = f_sum_Rjk_Cik; // note that the second half of the sum might be actually recurrent and easy to recover from the previous diagonal elements // less code this way size_t n_block_k_size; size_t n_block_k = R.n_Find_BlockColumn(k, n_block_k_size); size_t n_block_k_base = R.n_BlockColumn_Base(n_block_k); CUberBlockMatrix::_TyMatrixXdRef block_j_k = R_tr.t_GetBlock_Log(n_block_k, n_block_j); if(!block_j_k.cols()) continue; // no such block (zero, sparse) // todo - rewrite to form of a loop over *existing* blocks f_sum_Rjk_Cik += C_dep(i, k) * block_j_k(k - n_block_k_base, j - n_block_j_base); // product //if(k == i) // f_sum_first_elem = C_dep(i, k) * block_j_k(k - n_block_k_base, j - n_block_j_base); // save that as well } // note that this is a single loop, which skips iterating over element i /*{ // debugging of more recurrent formula size_t m = i + 1; // wha? size_t n_block_m_size; size_t n_block_m = R.n_Find_BlockColumn(m, n_block_m_size); size_t n_block_m_base = R.n_BlockColumn_Base(n_block_m); CUberBlockMatrix::_TyMatrixXdRef diag_block_m = R.t_BlockAt(R.n_BlockColumn_Block_Num(n_block_m) - 1, n_block_m); double f_R_mm = cur_L_diag_block(m - n_block_m_base, m - n_block_m_base); double f_sum_part1 = -(C_dep(m, m) * f_R_mm - 1 / f_R_mm); double f_sum_parted = f_sum_part0 + f_sum_part1; double f_sum_err = fabs(f_sum_parted - f_sum_Rjk_Cik); }*/ CUberBlockMatrix::_TyMatrixXdRef block_j_j = R.t_BlockAt(R.n_BlockColumn_Block_Num(n_block_j) - 1, n_block_j); double R_j_j = block_j_j(j - n_block_j_base, j - n_block_j_base); // get another diagonal element of R C_dep(i, j) = C_dep(j, i) = f_sum_Rjk_Cik / -R_j_j; } // calculate C_dep_{n - 1 - i} by recurrent formula printf("%d, error: %g%6s\r", i, (C_dep.col(i) - C.col(i)).norm(), ""); i = n - 1 - i; // go back to i } } // calculate inverse of R using backsubstitution printf("norm of C - C_full is %g\n", (C_full - C).norm()); printf("norm of the diagonal of C - C_direct is %g\n", (C_direct.diagonal() - C.diagonal()).norm()); printf("norm of C - C_dep is %g\n", (C_dep - C).norm()); printf("norm of the last column of C - C_dep is %g\n", (C_dep.col(n - 1) - C.col(n - 1)).norm()); printf("norm of the diagonal of C - C_dep is %g\n", (C_dep.diagonal() - C.diagonal()).norm()); p_fw = fopen("R_inv.txt", "w"); CDebug::Print_DenseMatrix_in_MatlabFormat(p_fw, R_inv, "R_inv = ", "\n"); fclose(p_fw); p_fw = fopen("C.txt", "w"); CDebug::Print_DenseMatrix_in_MatlabFormat(p_fw, C, "C = ", "\n"); fclose(p_fw); p_fw = fopen("C_banded.txt", "w"); CDebug::Print_DenseMatrix_in_MatlabFormat(p_fw, C_direct, "C_banded = ", "\n"); fclose(p_fw); // save for comparison with matlab formulas } #endif // 0 /** * @brief the fixed-block-size kernel for incremental update of sparse block marginals */ class CMarginalsUpdate_FBSKernel { public: /** * @brief sums vertex dimensions in a binary edge (affects the size of the omega matrix) * @tparam CEdgeType is an edge type name */ template <class CEdgeType> class CEdgeTypeToSumOfVertexDims { public: /** * @brief result stored in an enum */ enum { n_edge_rank = CEdgeType::n_vertex_num, // handles edges of rank 1 and 2, higher ranks result in compile-time error n_result = CEdgeType::template CVertexTraits<0>::n_dimension + ((n_edge_rank == 1)? 0 : 1) * CEdgeType::template CVertexTraits<(n_edge_rank == 2)? 1 : (n_edge_rank == 1)? 0 : -1>::n_dimension /**< @brief sum of vertex dimensions */ // rank 1 counts vertex 0 twice and shorts it by multiplying by zero // higher ranks cause compile-time error by accessing vertex -1 }; typedef fbs_ut::CCTSize<n_result> _TyResult; /**< @brief sum of vertex dimensions as CCTSize specialization */ }; /** * @brief strips a pointer or a reference from a type * @tparam CClass is input type */ template <class CClass> class CExtractTypename { public: typedef CClass _TyResult; /**< @brief resulting type with a single pointer / reference stripped */ }; /** * @brief strips a pointer or a reference from a type (specialization for pointer types) * @tparam CClass is input type */ template <class CClass> class CExtractTypename<CClass*> { public: typedef CClass _TyResult; /**< @brief resulting type with a single pointer / reference stripped */ }; /** * @brief strips a pointer or a reference from a type (specialization for reference types) * @tparam CClass is input type */ template <class CClass> class CExtractTypename<CClass&> { public: typedef CClass _TyResult; /**< @brief resulting type with a single pointer / reference stripped */ }; /** * @brief outer loop context * @tparam CTimerSamplerRefType is type of timer sampler (CVoidTimerSampler or CTimerSampler), or a reference to it */ template <class CTimerSamplerRefType> // timing or not timing struct TOuterContext { typedef typename CExtractTypename<CTimerSamplerRefType>::_TyResult _TyTimerSampler; /**< @brief type of timer sampler (CVoidTimerSampler or CTimerSampler) */ bool &r_b_result; /**< @brief reference to calculation result flag; cleared upon numerical failure */ CTimerSamplerRefType r_t_timer_sampler; /**< @brief timer sampler for profiling */ // not a reference, that type is either a reference or void const CUberBlockMatrix &r_omega_slim; /**< @brief omega matrix permuted to have no empty rows / columns */ const std::vector<size_t> &r_required_column_list; /**< @brief the list of columns of Sigma required for the update */ // intermediate CUberBlockMatrix &r_prev_marginals; /**< @brief previous marginals, will be updated inplace to current marginals */ const CUberBlockMatrix &r_lambda_in_natural_order; /**< @brief reference to the lambda matrix (must be in natural order) */ const CUberBlockMatrix &r_R; /**< @brief reference to ordered Cholesky factorization of r_lambda_in_natural_order */ const CMatrixOrdering &r_mord; /**< @brief reference to the ordering, used in the Cholesky factorization of r_R */ bool b_update_diag_only; /**< @brief diagonal update flag (if set, only the diagonal is updated) */ // parameters /** * @brief default constructor * * @param[in] _r_b_result is reference to calculation result flag; cleared upon numerical failure * @param[in] _r_t_timer_sampler is timer sampler for profiling * @param[in] _r_omega_slim is omega matrix permuted to have no empty rows / columns * @param[in] _r_required_column_list is the list of columns of Sigma required for the update * @param[in] _r_prev_marginals is previous marginals, will be updated inplace to current marginals * @param[in] _r_lambda_in_natural_order is reference to the lambda matrix (must be in natural order) * @param[in] _r_R is reference to ordered Cholesky factorization of _r_lambda_in_natural_order * @param[in] _r_mord is reference to the ordering, used in the Cholesky factorization of _r_R * @param[in] _b_update_diag_only is diagonal update flag (if set, only the diagonal is updated) */ inline TOuterContext(bool &_r_b_result, _TyTimerSampler _r_t_timer_sampler, const CUberBlockMatrix &_r_omega_slim, const std::vector<size_t> &_r_required_column_list, CUberBlockMatrix &_r_prev_marginals, const CUberBlockMatrix &_r_lambda_in_natural_order, const CUberBlockMatrix &_r_R, const CMatrixOrdering &_r_mord, bool _b_update_diag_only) :r_b_result(_r_b_result), r_t_timer_sampler(_r_t_timer_sampler), r_omega_slim(_r_omega_slim), r_required_column_list(_r_required_column_list), r_prev_marginals(_r_prev_marginals), r_lambda_in_natural_order(_r_lambda_in_natural_order), r_R(_r_R), r_mord(_r_mord), b_update_diag_only(_b_update_diag_only) {} }; /** * @brief middle loop context */ struct TMiddleContext { CUberBlockMatrix &r_prev_marginals; /**< @brief previous marginals, will be updated inplace to current marginals */ size_t i; /**< @brief outer loop counter */ size_t n_cur_state_size; /**< @brief current state size, in elements */ size_t n_prev_state_size; /**< @brief previous marginals size, in elements */ const double *p_Tu; /**< @brief pointer to the Tu matrix (aligned for SSE where applicable) */ const double *p_Bu; /**< @brief pointer to the Bu matrix (aligned for SSE where applicable) */ /** * @brief default constructor * * @param[in] _r_prev_marginals is reference to the previous marginals, will be updated * inplace to current marginals * @param[in] _i is outer loop counter * @param[in] _n_cur_state_size is current state size, in elements * @param[in] _n_prev_state_size is previous marginals size, in elements * @param[in] _p_Tu is pointer to the Tu matrix (aligned for SSE where applicable) * @param[in] _p_Bu is pointer to the Bu matrix (aligned for SSE where applicable) */ inline TMiddleContext(CUberBlockMatrix &_r_prev_marginals, size_t _i, size_t _n_cur_state_size, size_t _n_prev_state_size, const double *_p_Tu, const double *_p_Bu) :r_prev_marginals(_r_prev_marginals), i(_i), n_cur_state_size(_n_cur_state_size), n_prev_state_size(_n_prev_state_size), p_Tu(_p_Tu), p_Bu(_p_Bu) {} }; /** * @brief inner loop context */ struct TInnerContext : public TMiddleContext { size_t j; /**< @brief middle loop counter */ size_t n_row_base; /**< @brief leading row of the block being updated (in the previous marginals matrix) */ size_t n_col_base; /**< @brief leading column of the block being updated (in the previous marginals matrix) */ /** * @brief default constructor * * @param[in] t_ctx is middle loop context * @param[in] _j is middle loop counter * @param[in] _n_row_base is leading row of the block being updated (in the previous marginals matrix) * @param[in] _n_col_base is leading column of the block being updated (in the previous marginals matrix) */ inline TInnerContext(TMiddleContext t_ctx, size_t _j, size_t _n_row_base, size_t _n_col_base) :TMiddleContext(t_ctx), j(_j), n_row_base(_n_row_base), n_col_base(_n_col_base) {} }; /** * @brief the inner loop; updates a single block of the marginals * * @tparam n_rows is number of rows of the current block, in elements * @tparam COmegaElems_Cols is n_omega_elems and number of columns of the * current block (both in elements), packed as CCTSize2D; in case number * of columns is -1, it is set the same as the number of rows */ template <const int n_rows, class COmegaElems_Cols> struct TInnerLoop { /** * @brief loop body implementation * @param[in] t_ctx is loop context */ static inline void Do(TInnerContext t_ctx) { enum { n_omega_elems = COmegaElems_Cols::n_row_num, n_cols = (COmegaElems_Cols::n_column_num == -1)? n_rows : COmegaElems_Cols::n_column_num // using this for both diagonal : off-diagonal blocks }; CUberBlockMatrix &r_prev_marginals = t_ctx.r_prev_marginals; const size_t i = t_ctx.i; const size_t j = t_ctx.j; const size_t n_row_base = t_ctx.n_row_base; const size_t n_col_base = t_ctx.n_col_base; //const size_t n_cur_state_size = t_ctx.n_cur_state_size; const size_t n_prev_state_size = t_ctx.n_prev_state_size; //const Eigen::Map<const Eigen::Matrix<double, Eigen::Dynamic, n_omega_elems> > // Tu_full(t_ctx.p_Tu, n_cur_state_size, n_omega_elems); enum { n_omega_stride = n_Align_Up_POT_Static(n_omega_elems, 8) }; const Eigen::Map<const Eigen::Matrix<double, n_omega_elems, Eigen::Dynamic>, Eigen::Aligned, Eigen::OuterStride<n_omega_stride> > TuT(t_ctx.p_Tu, n_omega_elems, n_prev_state_size); const Eigen::Map<const Eigen::Matrix<double, n_omega_elems, Eigen::Dynamic>, Eigen::Aligned, Eigen::OuterStride<n_omega_stride> > Bu(t_ctx.p_Bu, n_omega_elems, n_prev_state_size); // unwrap the contexts //r_prev_marginals.t_Block_AtColumn<n_rows, n_cols>(i, j) -= // Tu_full.template middleRows<n_rows>(n_row_base).lazyProduct(Bu.template middleCols<n_cols>(n_col_base)); // or use .block(), not sure how this infers the rest of the dimensions (it could, though) /*r_prev_marginals.t_Block_AtColumn<n_rows, n_cols>(i, j) -= Tu_full.template block<n_rows, n_omega_elems>(n_row_base, 0).lazyProduct( Bu.template block<n_omega_elems, n_cols>(0, n_col_base)); // more of the same with blocks (seems to work, though)*/ r_prev_marginals.t_Block_AtColumn<n_rows, n_cols>(i, j) -= TuT.template block<n_omega_elems, n_rows>(0, n_row_base).transpose().lazyProduct( Bu.template block<n_omega_elems, n_cols>(0, n_col_base)); // if we transpose Tu, the locality of reference increases a lot } }; /** * @brief the middle loop; just visits every block in the given column * and calls the inner loop to calculate update * * @tparam n_cols is size of the current block column, in elements * @tparam COmegaElems_HessianMatrixBlockList is a typelist, containing * value of n_omega_elems as CCTSize and list of hessian block sizes */ template <const int n_cols, class COmegaElems_HessianMatrixBlockList> struct TMiddleLoop { /** * @brief loop body implementation * @param[in] t_ctx is loop context */ static inline void Do(TMiddleContext t_ctx) { enum { n_omega_elems = COmegaElems_HessianMatrixBlockList::_TyHead::n_size }; typedef typename COmegaElems_HessianMatrixBlockList::_TyTail::_TyHead _TyHessianMatrixBlockList; const CUberBlockMatrix &r_prev_marginals = t_ctx.r_prev_marginals; const size_t i = t_ctx.i; // unwrap the contexts size_t n_col_base = r_prev_marginals.n_BlockColumn_Base(i); for(size_t j = 0, m = r_prev_marginals.n_BlockColumn_Block_Num(i); j < m; ++ j) { size_t n_row = r_prev_marginals.n_Block_Row(i, j); size_t n_row_base = r_prev_marginals.n_BlockColumn_Base(n_row); size_t n_rows = r_prev_marginals.n_BlockColumn_Column_Num(n_row); // is symmetric fbs_ut::CWrap2<TInnerLoop, fbs_ut::CCTSize2D<n_omega_elems, n_cols> >::template In_RowHeight_DecisionTree_Given_ColumnWidth<_TyHessianMatrixBlockList, n_cols>(int(n_rows), TInnerContext(t_ctx, j, n_row_base, n_col_base)); /*r_prev_marginals.t_Block_AtColumn(i, j) -= Tu_full.middleRows(n_row_base, n_rows).lazyProduct(Bu.middleCols(n_col_base, n_cols)); //r_prev_marginals.t_Block_AtColumn(i, j) -= // Tu_full.block(n_row_base, 0, n_rows, n_omega_elems).lazyProduct( // Bu.block(0, n_col_base, n_omega_elems, n_cols)); // maybe it would be faster using Tu_full? it makes no difference here. // could FBS these products*/ } } }; /** * @brief the outer loop of the update; calculates Bu and Tu and calls the inner loops * * @tparam n_omega_elems is size of packed omega matrix * @tparam CSystemType is system type */ template <const int n_omega_elems, class CSystemType> struct TOuterLoop { typedef typename CSystemType::_TyHessianMatrixBlockList _TyHessianMatrixBlockList; /**< @brief list of block sizes in lambda / omega / marginals */ /** * @brief loop body implementation * @tparam _TyOuterContext is context type (it is specialized by timer sampler type) * @param[in] t_ctx is loop context * @note This function throws std::bad_alloc. */ template <class _TyOuterContext> static inline void Do(_TyOuterContext t_ctx) // throw(std::bad_alloc) { typename _TyOuterContext::_TyTimerSampler timer = t_ctx.r_t_timer_sampler; bool &r_b_result = t_ctx.r_b_result; const CUberBlockMatrix &lambda = t_ctx.r_lambda_in_natural_order; CUberBlockMatrix &r_prev_marginals = t_ctx.r_prev_marginals; const CUberBlockMatrix &omega_slim = t_ctx.r_omega_slim; const std::vector<size_t> &required_column_list = t_ctx.r_required_column_list; const CUberBlockMatrix &r_R = t_ctx.r_R; const CMatrixOrdering &mord = t_ctx.r_mord; const bool b_update_diag_only = t_ctx.b_update_diag_only; // unwrap the context typename _TyOuterContext::_TyTimerSampler::_TySample f_omega_time = 0; typename _TyOuterContext::_TyTimerSampler::_TySample f_dense_margs_time = 0; typename _TyOuterContext::_TyTimerSampler::_TySample f_update_basis_time = 0; typename _TyOuterContext::_TyTimerSampler::_TySample f_update_time = 0; typename _TyOuterContext::_TyTimerSampler::_TySample f_extend_time = 0; // t_odo - is this even legal if _TyOuterContext::_TyTimerSampler is a reference to a type? // no, it is not legal, gives a "... is not a class or type name" error. // the reference needs to be stripped. Eigen::Matrix<double, n_omega_elems, n_omega_elems> omega_dense; omega_slim.Convert_to_Dense(omega_dense); // initialize the fixed-size matrix omega_dense.template triangularView<Eigen::StrictlyLower>() = omega_dense.template triangularView<Eigen::StrictlyUpper>().transpose(); // need both halves! (block matrix omega doesn't contain its lower triangular part) timer.Accum_DiffSample(f_omega_time); const size_t n_cur_state_size = lambda.n_Column_Num(); const size_t n_prev_state_size = r_prev_marginals.n_Column_Num(); const size_t n_prev_state_block_num = r_prev_marginals.n_BlockColumn_Num(); //const size_t n_omega_elems = omega_slim.n_Column_Num(); // compile-time size now const size_t n_packed_block_column_num = omega_slim.n_BlockColumn_Num(); // depends on how n-ary the edge is, currently this is always 2 in here Eigen::Matrix<double, Eigen::Dynamic, n_omega_elems> Tu_full(n_cur_state_size, n_omega_elems); // do not allocate it smaller, will need these to update the new covs! _ASSERTE(n_packed_block_column_num <= INT_MAX); int _n_packed_block_column_num = int(n_packed_block_column_num); #pragma omp parallel for if(n_prev_state_block_num > 1000) for(int i = 0; i < _n_packed_block_column_num; ++ i) { // t_odo - could run in parallel, but usually needs like two to six columns (threads) size_t n_block_base_margs = lambda.n_BlockColumn_Base(required_column_list[i]); // position in the (dense) marginals size_t n_block_base_Tu = omega_slim.n_BlockColumn_Base(i); size_t n_block_cols = omega_slim.n_BlockColumn_Column_Num(i); // get dimensions of this block CMarginals::Calculate_SubblockMarginals_Fast_ColumnBand_FBS< typename CSystemType::_TyHessianMatrixBlockList>( Tu_full.block(0, n_block_base_Tu, n_cur_state_size, n_block_cols), r_R, n_block_base_margs, mord.p_Get_InverseOrdering(), mord.n_Ordering_Size(), mord.n_Ordering_Size()/*n_prev_state_block_num*/); // really calculate a block of dense marginals } Eigen::Block<Eigen::Matrix<double, Eigen::Dynamic, n_omega_elems>, Eigen::Dynamic, n_omega_elems> Tu = Tu_full.topRows(n_prev_state_size); // assemble Tu (wow, look at *that* type!) Eigen::Matrix<double, n_omega_elems, n_omega_elems> s; for(size_t i = 0; i < n_packed_block_column_num; ++ i) { size_t n_block_base_row_Tu = lambda.n_BlockColumn_Base(required_column_list[i]); // position in the (dense) marginals size_t n_block_base_row_s = omega_slim.n_BlockColumn_Base(i); size_t n_block_rows = omega_slim.n_BlockColumn_Column_Num(i); // is symmetric // get dimensions of this block if(n_block_base_row_Tu < n_prev_state_size) { s.block(n_block_base_row_s, 0, n_block_rows, n_omega_elems) = Tu.block(n_block_base_row_Tu, 0, n_block_rows, n_omega_elems); } else s.block(n_block_base_row_s, 0, n_block_rows, n_omega_elems).setZero(); // copy block from Tu to s } // cut out s (could be performed inside a sparse block matrix // multiplication, except now we don't have the data in a block matrix) timer.Accum_DiffSample(f_dense_margs_time); #ifdef _DEBUG Eigen::LLT<Eigen::Matrix<double, n_omega_elems, n_omega_elems>, Eigen::Upper> llt(omega_dense); if(llt.info() == Eigen::Success) printf("debug: was able to use the first LL^T Cholesky\n"); // does this ever happen? // this happens a #endif // _DEBUG Eigen::Matrix<double, n_omega_elems, n_omega_elems> V = Eigen::Matrix<double, n_omega_elems, n_omega_elems>::Identity() - s * omega_dense; // calculate V // t_odo - do more manual inversion (with LU?) of V, // in case it is not invertible, refrain to use batch marginals Eigen::FullPivLU<Eigen::Matrix<double, n_omega_elems, n_omega_elems> > luV(V); if(!luV.isInvertible()) { r_b_result = false; return; // that's a runtime err } typedef forward_allocated_pool<double, 0, 64> CAlloc; enum { n_omega_stride = n_Align_Up_POT_Static(n_omega_elems, 8) }; double *p_bu_buffer = (double*)CAlloc::aligned_alloc(n_omega_stride * n_prev_state_size * sizeof(double)); double *p_tut_buffer = (double*)CAlloc::aligned_alloc(n_omega_stride * n_prev_state_size * sizeof(double)); // t_odo - try to increase n_omega_elems to the next multiple of 8 Eigen::Map<Eigen::Matrix<double, n_omega_elems, Eigen::Dynamic>, Eigen::Aligned, Eigen::OuterStride<n_omega_stride> > Bu(p_bu_buffer, n_omega_elems, n_prev_state_size); Eigen::Map<Eigen::Matrix<double, n_omega_elems, Eigen::Dynamic>, Eigen::Aligned, Eigen::OuterStride<n_omega_stride> > TuT(p_tut_buffer, n_omega_elems, n_prev_state_size); TuT = Tu.transpose(); // keep this as well, for a better memory locality Bu = (omega_dense * luV.inverse()) * TuT; // t_odo - produce the symmetrical product; or double the memory and have e.g. right side // of the product mutliply Tu (that way at least the computation is saved, if not storage) timer.Accum_DiffSample(f_update_basis_time); _ASSERTE(n_prev_state_block_num <= INT_MAX); int _n_prev_state_block_num = int(n_prev_state_block_num); #pragma omp parallel for if(n_prev_state_block_num > 1000) for(int i = 0; i < _n_prev_state_block_num; ++ i) { // t_odo - this needs to run in parallel size_t n_cols = r_prev_marginals.n_BlockColumn_Column_Num(i); if(b_update_diag_only) { size_t n_col_base = r_prev_marginals.n_BlockColumn_Base(i); fbs_ut::CWrap2<TInnerLoop, fbs_ut::CCTSize2D<n_omega_elems, -1> >::template In_ColumnWidth_DecisionTree<_TyHessianMatrixBlockList>(int(n_cols), TInnerContext(TMiddleContext(r_prev_marginals, i, n_cur_state_size, n_prev_state_size, p_tut_buffer/*Tu_full.data()*/, p_bu_buffer/*Bu.data()*/), i, n_col_base, n_col_base)); /*r_prev_marginals.t_GetBlock_Log(i, i) -= Tu_full.middleRows(n_col_base, n_cols).lazyProduct(Bu.middleCols(n_col_base, n_cols)); // maybe it would be faster using Tu_full? it makes no difference here. // t_odo - could FBS these products*/ } else { typedef typename MakeTypelist(fbs_ut::CCTSize<n_omega_elems>, _TyHessianMatrixBlockList) _TySecondaryContext; fbs_ut::CWrap2<TMiddleLoop, _TySecondaryContext>::template In_ColumnWidth_DecisionTree<_TyHessianMatrixBlockList>(int(n_cols), TMiddleContext(r_prev_marginals, i, n_cur_state_size, n_prev_state_size, p_tut_buffer/*Tu_full.data()*/, p_bu_buffer/*Bu.data()*/)); /*size_t n_col_base = r_prev_marginals.n_BlockColumn_Base(i); for(size_t j = 0, m = r_prev_marginals.n_BlockColumn_Block_Num(i); j < m; ++ j) { size_t n_row = r_prev_marginals.n_Block_Row(i, j); size_t n_row_base = r_prev_marginals.n_BlockColumn_Base(n_row); size_t n_rows = r_prev_marginals.n_BlockColumn_Column_Num(n_row); // is symmetric r_prev_marginals.t_Block_AtColumn(i, j) -= Tu_full.middleRows(n_row_base, n_rows).lazyProduct(Bu.middleCols(n_col_base, n_cols)); //r_prev_marginals.t_Block_AtColumn(i, j) -= // Tu_full.block(n_row_base, 0, n_rows, n_omega_elems).lazyProduct( // Bu.block(0, n_col_base, n_omega_elems, n_cols)); // maybe it would be faster using Tu_full? it makes no difference here. // could FBS these products }*/ } } // update (actually "minus downdate") the existing blocks; in order to check, // however, need to update all the blocks that are already there timer.Accum_DiffSample(f_update_time); size_t n_new_vertex_num = lambda.n_BlockColumn_Num() - n_prev_state_block_num; for(size_t i = 0; i < n_new_vertex_num; ++ i) { // can't run in parallel, changes the matrix layout size_t n_vertex = n_prev_state_block_num + i; size_t n_dim = lambda.n_BlockColumn_Column_Num(n_vertex); size_t n_block_col_in_Tu = std::find(required_column_list.begin(), required_column_list.end(), n_vertex) - required_column_list.begin(); size_t n_col_in_Tu = omega_slim.n_BlockColumn_Base(n_block_col_in_Tu); size_t n_row_in_Tu = lambda.n_BlockColumn_Base(n_vertex); // see which vertex it is in Tu /*r_prev_marginals.ExtendTo(r_prev_marginals.n_Row_Num() + n_dim, r_prev_marginals.n_Column_Num() + n_dim); // debug - just enlarge, no new blocks*/ r_prev_marginals.t_GetBlock_Log(n_vertex, n_vertex, n_dim, n_dim, true, false) = Tu_full.block(n_row_in_Tu, n_col_in_Tu, n_dim, n_dim); // amazingly correct // need Tu_full, the blocks are not present in just Tu // todo - FBS this (low prio, just a copy) } // put there the new blocks timer.Accum_DiffSample(f_extend_time); typename _TyOuterContext::_TyTimerSampler::_TySample f_total_time = 0; timer.Accum_CumTime_LastSample(f_total_time); if(!CEqualType<typename _TyOuterContext::_TyTimerSampler, CVoidTimerSampler>::b_result) { printf("marginals update took %.5f msec\n", f_total_time * 1000.0); printf("\tomega: %.5f msec\n", f_omega_time * 1000.0); printf("\tTu, s: %.5f msec\n", f_dense_margs_time * 1000.0); printf("\tbasis: %.5f msec\n", f_update_basis_time * 1000.0); printf("\t upd: %.5f msec\n", f_update_time * 1000.0); printf("\t ext: %.5f msec\n", f_extend_time * 1000.0); } CAlloc::aligned_free(p_bu_buffer); CAlloc::aligned_free(p_tut_buffer); r_b_result = true; // all ok } }; public: /** * @brief incrementally updates sparse blocky marginals on diagonal * or everywhere (version with fixed block size) * * @param[in,out] timer is timer sampler (for profiling) * @param[in] omega_slim is reordered and packed omega * @param[in] required_column_list is a list of vertices in omega * @param[in,out] r_prev_marginals is reference to the marginals to be updated (must be in natural order) * @param[in] r_lambda_in_natural_order is reference to the lambda matrix (must be in natural order) * @param[in] r_R is reference to ordered Cholesky factorization of r_lambda_in_natural_order * @param[in] mord is reference to the ordering, used in the Cholesky factorization of r_R * @param[in] b_update_diag_only is diagonal update flag (if set, only the diagonal is updated) * * @return Returns true on success, false on numerical issue * (in that case, r_prev_marginals is not modified). */ template <class CSystemType, class _TyTimerSamplerRef> static bool Run(_TyTimerSamplerRef timer, const CUberBlockMatrix &omega_slim, const std::vector<size_t> &required_column_list, CUberBlockMatrix &r_prev_marginals, const CUberBlockMatrix &r_lambda_in_natural_order, const CUberBlockMatrix &r_R, const CMatrixOrdering &mord, bool b_update_diag_only) // throw(std::bad_alloc) { const size_t n_omega_elems = omega_slim.n_Column_Num(); typedef typename CTransformTypelist<typename CSystemType::_TyEdgeTypelist, CEdgeTypeToSumOfVertexDims>::_TyResult TEdgeVertsSizeList; // vertex sizes of those bool b_result; _ASSERTE(n_omega_elems <= INT_MAX); fbs_ut::CWrap2<TOuterLoop, CSystemType>::template In_ScalarSize_DecisionTree<TEdgeVertsSizeList>(int(n_omega_elems), TOuterContext<_TyTimerSamplerRef>(b_result, timer, omega_slim, required_column_list, r_prev_marginals, r_lambda_in_natural_order, r_R, mord, b_update_diag_only)); // entere decision tree return b_result; // todo - move the decision tree code here } }; #if defined(__SE_TYPES_SUPPORT_L_SOLVERS) || 1 #if 0 // this was replaced by a more intelligent function /** * @brief incrementally updates sparse blocky marginals on diagonal * or everywhere (version with fixed block size) * * @tparam b_enable_timer is timing enable flag (if disabled, incurs no runtime costs) * @tparam CSystemType is optimized system type (for optimized FBS calls) * * @param[in] system is reference to the optimized system * @param[in,out] r_prev_marginals is reference to the marginals to be updated (must be in natural order) * @param[in] r_lambda_in_natural_order is reference to the lambda matrix (must be in natural order) * @param[in] r_R is reference to ordered Cholesky factorization of r_lambda_in_natural_order * @param[in] mord is reference to the ordering, used in the Cholesky factorization of r_R * @param[in] n_edges_in_prev_marginals is number of edges in r_prev_marginals * @param[in] b_update_diag_only is diagonal update flag (if set, only the diagonal is updated) * * @return Returns true on success, false on numerical issue * (in that case, r_prev_marginals is not modified). * * @note This function throws std::bad_alloc. */ // * @note This is only available if __SE_TYPES_SUPPORT_L_SOLVERS is defined (the Omega matrix is used). template <bool b_enable_timer, class CSystemType> static bool Update_BlockDiagonalMarginals_FBS(const CSystemType &system, CUberBlockMatrix &r_prev_marginals, const CUberBlockMatrix &r_lambda_in_natural_order, const CUberBlockMatrix &r_R, const CMatrixOrdering &mord, size_t n_edges_in_prev_marginals, bool b_update_diag_only = false) // throw(std::bad_alloc) { typedef typename CTimerSamplerTraits<b_enable_timer>::_TyTimerSampler _TyTimerSampler; typedef typename CTimerSamplerTraits<b_enable_timer>::_TyTimerSamplerRef _TyTimerSamplerRef; typedef typename _TyTimerSampler::_TySample _TySample; // choose a timer based on whether it is enabled or not CTimer t; _TyTimerSampler timer(t); const CUberBlockMatrix &lambda = r_lambda_in_natural_order; // rename const size_t n_prev_edge_num = n_edges_in_prev_marginals; size_t n_edge_num = system.r_Edge_Pool().n_Size(); if(n_prev_edge_num == n_edge_num) return true; // another job well done size_t n_order_min = r_prev_marginals.n_BlockColumn_Num(); for(size_t i = n_prev_edge_num; i < n_edge_num; ++ i) { typename CSystemType::_TyConstEdgeRef r_edge = system.r_Edge_Pool()[i]; for(size_t j = 0, m = r_edge.n_Vertex_Num(); j < m; ++ j) n_order_min = std::min(n_order_min, /*m_p_lambda_block_ordering[m_r_*/r_edge.n_Vertex_Id(j)/*]*/); // note that these are ids, but these equal order at the moment } size_t n_elem_order_min = r_prev_marginals.n_BlockColumn_Base(n_order_min); // no ordering here, that is correct (omega is not ordered either) CUberBlockMatrix omega; // todo - this might be up to the linear solver, in LM omega will be different std::vector<size_t> required_column_list; required_column_list.reserve(2 * (n_edge_num - n_prev_edge_num)); // a guess; might be edges with more or less than 2 verts for(size_t i = n_prev_edge_num; i < n_edge_num; ++ i) { // not parallel! (wouls have conflicts) typename CSystemType::_TyConstEdgeRef r_edge = system.r_Edge_Pool()[i]; r_edge.Calculate_Omega(omega, n_elem_order_min); for(size_t j = 0, m = r_edge.n_Vertex_Num(); j < m; ++ j) required_column_list.push_back(r_edge.n_Vertex_Id(j)); } // get omega and vertex id's std::sort(required_column_list.begin(), required_column_list.end()); required_column_list.erase(std::unique(required_column_list.begin(), required_column_list.end()), required_column_list.end()); // finalize the required column list (could use std::set, but that feels like overkill) std::vector<size_t> pack_order; pack_order.reserve(omega.n_BlockColumn_Num()); for(size_t i = 0, n = required_column_list.size(); i < n; ++ i) { size_t n_col = required_column_list[i]; size_t n_base = lambda.n_BlockColumn_Base(n_col) - n_elem_order_min; size_t n_col_size; size_t n_col_omega = omega.n_Find_BlockColumn(n_base, n_col_size); _ASSERTE(n_col_size == lambda.n_BlockColumn_Column_Num(n_col)); pack_order.push_back(n_col_omega); } // get numbers of "interesting" columns of omega (no ordering here either) const size_t n_packed_block_column_num = pack_order.size(); // remember this for(size_t i = 1; i < n_packed_block_column_num; ++ i) { size_t n_o0 = pack_order[i - 1]; size_t n_o1 = pack_order[i]; _ASSERTE(n_o1 > n_o0); // should be sorted for(size_t j = n_o0 + 1; j < n_o1; ++ j) pack_order.push_back(j); // append the rest of the cols at the end (only up to omega.n_BlockColumn_Num() / 2 of them) } _ASSERTE(pack_order.size() == omega.n_BlockColumn_Num()); // finalize the order std::vector<size_t> inv_pack_order(pack_order.size()); for(size_t i = 0, n = pack_order.size(); i < n; ++ i) inv_pack_order[pack_order[i]] = i; // inverse the order CUberBlockMatrix omega_slim; omega.Permute_UpperTriangular_To(omega_slim, &inv_pack_order[0], inv_pack_order.size(), true); // pack omega omega_slim.SliceTo(omega_slim, n_packed_block_column_num, n_packed_block_column_num, true); _ASSERTE(omega_slim.n_BlockColumn_Num() == n_packed_block_column_num); // slice off the empty columns if(n_prev_edge_num == n_edge_num + 1) { const size_t n_omega_elems = omega_slim.n_Column_Num(); #ifdef _DEBUG { size_t n_edge_dim = 0; typename CSystemType::_TyConstEdgeRef r_edge = system.r_Edge_Pool()[n_prev_edge_num]; for(size_t i = 0, n = r_edge.n_Vertex_Num(); i < n; ++ i) n_edge_dim += system.r_Vertex_Pool()[r_edge.n_Vertex_Id(i)].n_Dimension(); _ASSERTE(n_edge_dim == n_omega_elems); } // edge dimension should equal sum of dimensions of its vertices #endif // _DEBUG return CMarginalsUpdate_FBSKernel::template Run<CSystemType, _TyTimerSamplerRef>(timer, omega_slim, required_column_list, r_prev_marginals, r_lambda_in_natural_order, r_R, mord, b_update_diag_only); /*typedef typename CTransformTypelist<typename CSystemType::_TyEdgeTypelist, CMarginalsUpdate_FBSKernel::CEdgeTypeToSumOfVertexDims>::_TyResult TEdgeVertsSizeList; // vertex sizes of those bool b_result; _ASSERTE(n_omega_elems <= INT_MAX); fbs_ut::CWrap2<CMarginalsUpdate_FBSKernel::TOuterLoop, CSystemType>::template In_ScalarSize_DecisionTree<TEdgeVertsSizeList>(int(n_omega_elems), CMarginalsUpdate_FBSKernel::TOuterContext<_TyTimerSamplerRef>(b_result, timer, omega_slim, required_column_list, r_prev_marginals, r_lambda_in_natural_order, r_R, mord, b_update_diag_only)); // entere decision tree return b_result;*/ } // not proficient; t_odo - make a fake block matrix to hold all the dense mats (Tu. Bu, S, ...) so that they are aligned! // handle FBS processing (one dimension of Tu and Bu, and both dimensions // of both matrices are known at compile-time) Eigen::MatrixXd omega_dense; omega_slim.Convert_to_Dense(omega_dense); // get dense omega //omega_dense.bottomRightCorner(omega_dense.rows() - omega_slim.n_BlockColumn_Column_Num(0), // omega_dense.cols() - omega_slim.n_BlockColumn_Column_Num(0)).diagonal().array() += .1; // slightly lift the diagonal (except the first vertex), like it would happen in damped least squares _TySample f_omega_time = 0; _TySample f_dense_margs_time = 0; _TySample f_update_basis_time = 0; _TySample f_update_time = 0; _TySample f_extend_time = 0; timer.Accum_DiffSample(f_omega_time); const size_t n_cur_state_size = lambda.n_Column_Num(); const size_t n_prev_state_size = r_prev_marginals.n_Column_Num(); const size_t n_prev_state_block_num = r_prev_marginals.n_BlockColumn_Num(); const size_t n_omega_elems = omega_slim.n_Column_Num(); Eigen::MatrixXd Tu_full(n_cur_state_size, n_omega_elems); // do not allocate it smaller, will need these to update the new covs! _ASSERTE(n_packed_block_column_num <= INT_MAX); int _n_packed_block_column_num = int(n_packed_block_column_num); #pragma omp parallel for if(n_prev_state_block_num > 1000) for(int i = 0; i < _n_packed_block_column_num; ++ i) { // t_odo - could run in parallel, but usually needs like two to six columns (threads) size_t n_block_base_margs = lambda.n_BlockColumn_Base(required_column_list[i]); // position in the (dense) marginals size_t n_block_base_Tu = omega_slim.n_BlockColumn_Base(i); size_t n_block_cols = omega_slim.n_BlockColumn_Column_Num(i); // get dimensions of this block CMarginals::Calculate_SubblockMarginals_Fast_ColumnBand_FBS< typename CSystemType::_TyHessianMatrixBlockList>( Tu_full.block(0, n_block_base_Tu, n_cur_state_size, n_block_cols), r_R, n_block_base_margs, mord.p_Get_InverseOrdering(), mord.n_Ordering_Size(), mord.n_Ordering_Size()/*n_prev_state_block_num*/); // really calculate a block of dense marginals } Eigen::Block<Eigen::MatrixXd> Tu = Tu_full.topLeftCorner(n_prev_state_size, n_omega_elems); // assemble Tu Eigen::MatrixXd s(n_omega_elems, n_omega_elems); for(size_t i = 0; i < n_packed_block_column_num; ++ i) { size_t n_block_base_row_Tu = lambda.n_BlockColumn_Base(required_column_list[i]); // position in the (dense) marginals size_t n_block_base_row_s = omega_slim.n_BlockColumn_Base(i); size_t n_block_rows = omega_slim.n_BlockColumn_Column_Num(i); // is symmetric // get dimensions of this block if(n_block_base_row_Tu < n_prev_state_size) { s.block(n_block_base_row_s, 0, n_block_rows, n_omega_elems) = Tu.block(n_block_base_row_Tu, 0, n_block_rows, n_omega_elems); } else s.block(n_block_base_row_s, 0, n_block_rows, n_omega_elems).setZero(); // copy block from Tu to s } // cut out s (could be performed inside a sparse block matrix // multiplication, except now we don't have the data in a block matrix) timer.Accum_DiffSample(f_dense_margs_time); /*#ifdef _DEBUG Eigen::LLT<Eigen::MatrixXd, Eigen::Upper> llt(omega_dense); if(llt.info() == Eigen::Success) printf("debug: was able to use the first LL^T Cholesky\n"); // does this ever happen? it does, when one is very certain, typically when the covariance associated with the edges is high #endif // _DEBUG*/ omega_dense.triangularView<Eigen::StrictlyLower>() = omega_dense.triangularView<Eigen::StrictlyUpper>().transpose(); // need both halves! (block matrix omega doesn't contain its lower triangular part) Eigen::MatrixXd V = Eigen::MatrixXd::Identity(n_omega_elems, n_omega_elems) - s * omega_dense; // calculate V // t_odo - do more manual inversion (with LU?) of V, // in case it is not invertible, refrain to use batch marginals Eigen::FullPivLU<Eigen::MatrixXd> luV(V); if(!luV.isInvertible()) return false; // that's a runtime err //Eigen::MatrixXd Bu = ((omega_dense * luV.inverse()) * Tu.transpose()); // t_odo - produce the symmetrical product; or double the memory and have e.g. right side // of the product mutliply Tu (that way at least the computation is saved, if not storage) typedef forward_allocated_pool<double, 0, 64> CAlloc; const size_t n_omega_stride = n_Align_Up_POT(n_omega_elems, size_t(8)); double *p_tut_buffer = (double*)CAlloc::aligned_alloc(n_omega_stride * n_prev_state_size * sizeof(double)); double *p_bu_buffer = (double*)CAlloc::aligned_alloc(n_omega_stride * n_prev_state_size * sizeof(double)); // t_odo - try to increase n_omega_elems to the next multiple of 8 Eigen::Map<Eigen::MatrixXd, Eigen::Aligned, Eigen::OuterStride<> > TuT(p_tut_buffer, n_omega_elems, n_prev_state_size, Eigen::OuterStride<>(n_omega_stride)); Eigen::Map<Eigen::MatrixXd, Eigen::Aligned, Eigen::OuterStride<> > Bu(p_bu_buffer, n_omega_elems, n_prev_state_size, Eigen::OuterStride<>(n_omega_stride)); TuT = Tu.transpose(); // keep this as well, for a better memory locality Bu = (omega_dense * luV.inverse()) * TuT; // t_odo - produce the symmetrical product; or double the memory and have e.g. right side // of the product mutliply Tu (that way at least the computation is saved, if not storage) timer.Accum_DiffSample(f_update_basis_time); _ASSERTE(n_prev_state_block_num <= INT_MAX); int _n_prev_state_block_num = int(n_prev_state_block_num); #pragma omp parallel for if(n_prev_state_block_num > 1000) for(int i = 0; i < _n_prev_state_block_num; ++ i) { // t_odo - this needs to run in parallel size_t n_cols = r_prev_marginals.n_BlockColumn_Column_Num(i); if(b_update_diag_only) { size_t n_col_base = r_prev_marginals.n_BlockColumn_Base(i); #if 0 fbs_ut::CWrap2<CMarginalsUpdate_FBSKernel::TInnerLoop, fbs_ut::CCTSize2D<n_omega_elems, -1> >::template In_ColumnWidth_DecisionTree<_TyHessianMatrixBlockList>(int(n_cols), CMarginalsUpdate_FBSKernel::TInnerContext( CMarginalsUpdate_FBSKernel::TMiddleContext(r_prev_marginals, i, n_cur_state_size, n_prev_state_size, p_tut_buffer/*Tu_full.data()*/, p_bu_buffer/*Bu.data()*/), i, n_col_base, n_col_base)); #endif // 0 // n_omega_elems is not constant, would have to rewrite this a bit (it wouldn't be completely FBS) for(size_t j = 0, m = r_prev_marginals.n_BlockColumn_Block_Num(i); j < m; ++ j) { if(r_prev_marginals.n_Block_Row(i, j) == i) continue; // not the diag block r_prev_marginals.t_Block_AtColumn(i, j).setZero(); } // zero out other blocks so that i can see it r_prev_marginals.t_GetBlock_Log(i, i) -= TuT.middleCols(n_col_base, n_cols).transpose().lazyProduct(Bu.middleCols(n_col_base, n_cols)); // maybe it would be faster using Tu_full? it makes no difference here. // t_odo - could FBS these products*/ } else { #if 0 typedef typename MakeTypelist(fbs_ut::CCTSize<n_omega_elems>, _TyHessianMatrixBlockList) _TySecondaryContext; fbs_ut::CWrap2<CMarginalsUpdate_FBSKernel::TMiddleLoop, _TySecondaryContext>::template In_ColumnWidth_DecisionTree<_TyHessianMatrixBlockList>(int(n_cols), CMarginalsUpdate_FBSKernel::TMiddleContext(r_prev_marginals, i, n_cur_state_size, n_prev_state_size, p_tut_buffer/*Tu_full.data()*/, p_bu_buffer/*Bu.data()*/)); #endif // 0 // n_omega_elems is not constant, would have to rewrite this a bit (it wouldn't be completely FBS) size_t n_col_base = r_prev_marginals.n_BlockColumn_Base(i); for(size_t j = 0, m = r_prev_marginals.n_BlockColumn_Block_Num(i); j < m; ++ j) { size_t n_row = r_prev_marginals.n_Block_Row(i, j); size_t n_row_base = r_prev_marginals.n_BlockColumn_Base(n_row); size_t n_rows = r_prev_marginals.n_BlockColumn_Column_Num(n_row); // is symmetric r_prev_marginals.t_Block_AtColumn(i, j) -= TuT.middleCols(n_row_base, n_rows).transpose().lazyProduct(Bu.middleCols(n_col_base, n_cols)); //r_prev_marginals.t_Block_AtColumn(i, j) -= // Tu_full.block(n_row_base, 0, n_rows, n_omega_elems).lazyProduct( // Bu.block(0, n_col_base, n_omega_elems, n_cols)); // maybe it would be faster using Tu_full? it makes no difference here. // could FBS these products } } } // update (actually "minus downdate") the existing blocks; in order to check, // however, need to update all the blocks that are already there timer.Accum_DiffSample(f_update_time); size_t n_new_vertex_num = lambda.n_BlockColumn_Num() - n_prev_state_block_num; for(size_t i = 0; i < n_new_vertex_num; ++ i) { // can't run in parallel, changes the matrix layout size_t n_vertex = n_prev_state_block_num + i; size_t n_dim = lambda.n_BlockColumn_Column_Num(n_vertex); size_t n_block_col_in_Tu = std::find(required_column_list.begin(), required_column_list.end(), n_vertex) - required_column_list.begin(); size_t n_col_in_Tu = omega_slim.n_BlockColumn_Base(n_block_col_in_Tu); size_t n_row_in_Tu = lambda.n_BlockColumn_Base(n_vertex); // see which vertex it is in Tu /*r_prev_marginals.ExtendTo(r_prev_marginals.n_Row_Num() + n_dim, r_prev_marginals.n_Column_Num() + n_dim); // debug - just enlarge, no new blocks*/ r_prev_marginals.t_GetBlock_Log(n_vertex, n_vertex, n_dim, n_dim, true, false) = Tu_full.block(n_row_in_Tu, n_col_in_Tu, n_dim, n_dim); // amazingly correct // need Tu_full, the blocks are not present in just Tu } // put there the new blocks CAlloc::aligned_free(p_bu_buffer); CAlloc::aligned_free(p_tut_buffer); // should free in catch(bad_alloc) as well, will leave memory allocated if something throws timer.Accum_DiffSample(f_extend_time); _TySample f_total_time = 0; timer.Accum_CumTime_LastSample(f_total_time); if(b_enable_timer) { printf("marginals update took %.5f msec\n", f_total_time * 1000.0); printf("\tomega: %.5f msec\n", f_omega_time * 1000.0); printf("\tTu, s: %.5f msec\n", f_dense_margs_time * 1000.0); printf("\tbasis: %.5f msec\n", f_update_basis_time * 1000.0); printf("\t upd: %.5f msec\n", f_update_time * 1000.0); printf("\t ext: %.5f msec\n", f_extend_time * 1000.0); } return true; } #endif // 0 /** * @brief calculates approximate number of FLOPs which will be spent in sparse * calculation of marginals using the recursive formula */ static double f_RecursiveFormula_FLOP_Num(const CUberBlockMatrix &r_R, EBlockMatrixPart n_matrix_part) { if(n_matrix_part == mpart_Nothing) return 0; size_t nnz = r_R.n_NonZero_Num(), N = r_R.n_Column_Num(); if((n_matrix_part & mpart_Diagonal) != mpart_Diagonal) { return (nnz)? double(nnz) * r_R.n_BlockColumn_Column_Num(r_R.n_BlockColumn_Num() - 1) : 0; // very approximate cost for the last column / block using backsubstitution } // make sure that the diagonal is required; otherwise the cost is quite different return double(nnz) * nnz * N; // O(n_{nz}^2N) as in chapter IV of the paper // cost for calculating the skeleton marginals (the sparsity pattern of R), // which should be the dominating cost right now } /** * @brief calculates approximate number of FLOPs which will be spent in dense calculation in marginals update * * @tparam CSystemType is optimized system type (for optimized FBS calls) * * @param[in] system is reference to the optimized system * @param[in] r_prev_marginals is reference to the marginals to be updated (must be in natural order) * @param[in] r_lambda_in_natural_order is reference to the lambda matrix (must be in natural order) * @param[in] r_R is reference to ordered Cholesky factorization of r_lambda_in_natural_order * @param[in] mord is reference to the ordering, used in the Cholesky factorization of r_R (unused * at the moment, but could be useful to get a more precise approximate taking resumed * backsubstitution into account) * @param[in] n_edges_in_prev_marginals is number of edges in r_prev_marginals * @param[in] n_matrix_part is combination of EBlockMatrixPart * * @return Returns true on success, false on numerical issue * (in that case, r_prev_marginals is not modified). * * @note This function throws std::bad_alloc. */ template <class CSystemType> static double f_MarginalsUpdate_DenseFLOP_Num(const CSystemType &system, const CUberBlockMatrix &r_prev_marginals, const CUberBlockMatrix &r_lambda_in_natural_order, const CUberBlockMatrix &r_R, const CMatrixOrdering &UNUSED(mord), size_t n_edges_in_prev_marginals, EBlockMatrixPart n_matrix_part) // throw(std::bad_alloc) { const CUberBlockMatrix &lambda = r_lambda_in_natural_order; // rename const size_t n_prev_edge_num = n_edges_in_prev_marginals; size_t n_edge_num = system.r_Edge_Pool().n_Size(); if(n_prev_edge_num == n_edge_num) return 0; if(n_matrix_part == mpart_Nothing) return 0; // free cases //size_t n_backsubst_cost = r_R.n_Storage_Size(); // a quick and dirty approximate of NNZs size_t n_backsubst_cost = r_R.n_NonZero_Num(); if((n_matrix_part == mpart_LastBlock || n_matrix_part == mpart_LastColumn) && lambda.n_BlockColumn_Num() > r_prev_marginals.n_BlockColumn_Num()) { size_t n_new_column_num = lambda.n_BlockColumn_Num() - r_prev_marginals.n_BlockColumn_Num(); return double(n_new_column_num) * n_backsubst_cost; // double so we don't have to care about overflows (as much) } // pure extension cannot be updated #if 0 size_t n_order_min = r_prev_marginals.n_BlockColumn_Num(); for(size_t i = n_prev_edge_num; i < n_edge_num; ++ i) { typename CSystemType::_TyConstEdgeRef r_edge = system.r_Edge_Pool()[i]; for(size_t j = 0, m = r_edge.n_Vertex_Num(); j < m; ++ j) n_order_min = std::min(n_order_min, /*m_p_lambda_block_ordering[m_r_*/r_edge.n_Vertex_Id(j)/*]*/); // note that these are ids, but these equal order at the moment } size_t n_elem_order_min = r_prev_marginals.n_BlockColumn_Base(n_order_min); // no ordering here, that is correct (omega is not ordered either) #endif // 0 // not needed, we dont form omega here std::vector<size_t> required_column_list; required_column_list.reserve(2 * (n_edge_num - n_prev_edge_num)); // a guess; might be edges with more or less than 2 verts for(size_t i = n_prev_edge_num; i < n_edge_num; ++ i) { // not parallel! (wouls have conflicts) typename CSystemType::_TyConstEdgeRef r_edge = system.r_Edge_Pool()[i]; for(size_t j = 0, m = r_edge.n_Vertex_Num(); j < m; ++ j) required_column_list.push_back(r_edge.n_Vertex_Id(j)); } // get omega and vertex id's std::sort(required_column_list.begin(), required_column_list.end()); required_column_list.erase(std::unique(required_column_list.begin(), required_column_list.end()), required_column_list.end()); // finalize the required column list (could use std::set, but that feels like overkill) size_t n_Tu_full_column_num = 0; for(size_t i = 0, n = required_column_list.size(); i < n; ++ i) n_Tu_full_column_num += lambda.n_BlockColumn_Column_Num(required_column_list[i]); // calculate how many columns do we need to backsubstitute for return double(n_Tu_full_column_num) * n_backsubst_cost; // double so we don't have to care about overflows (as much) } /** * @brief calculates approximate memory size required for dense calculation in marginals update * * @tparam CSystemType is optimized system type (for optimized FBS calls) * * @param[in] system is reference to the optimized system * @param[in] r_prev_marginals is reference to the marginals to be updated (must be in natural order) * @param[in] r_lambda_in_natural_order is reference to the lambda matrix (must be in natural order) * @param[in] r_R is reference to ordered Cholesky factorization of r_lambda_in_natural_order * @param[in] mord is reference to the ordering, used in the Cholesky factorization of r_R (unused * at the moment, but could be useful to get a more precise approximate taking resumed * backsubstitution into account) * @param[in] n_edges_in_prev_marginals is number of edges in r_prev_marginals * @param[in] n_matrix_part is combination of EBlockMatrixPart * * @return Returns true on success, false on numerical issue * (in that case, r_prev_marginals is not modified). * * @note This function throws std::bad_alloc. */ template <class CSystemType> static double f_MarginalsUpdate_DenseBytes(const CSystemType &system, const CUberBlockMatrix &r_prev_marginals, const CUberBlockMatrix &r_lambda_in_natural_order, const CUberBlockMatrix &r_R, const CMatrixOrdering &UNUSED(mord), size_t n_edges_in_prev_marginals, EBlockMatrixPart n_matrix_part) // throw(std::bad_alloc) { const CUberBlockMatrix &lambda = r_lambda_in_natural_order; // rename const size_t n_prev_edge_num = n_edges_in_prev_marginals; size_t n_edge_num = system.r_Edge_Pool().n_Size(); if(n_prev_edge_num == n_edge_num) return 0; if(n_matrix_part == mpart_Nothing) return 0; // free cases size_t n_row_size = r_R.n_Row_Num(); if((n_matrix_part == mpart_LastBlock || n_matrix_part == mpart_LastColumn) && lambda.n_BlockColumn_Num() > r_prev_marginals.n_BlockColumn_Num()) { size_t n_new_column_num = lambda.n_BlockColumn_Num() - r_prev_marginals.n_BlockColumn_Num(); return double(n_new_column_num) * sizeof(double) * n_row_size; // double so we don't have to care about overflows (as much) } // pure extension cannot be updated #if 0 size_t n_order_min = r_prev_marginals.n_BlockColumn_Num(); for(size_t i = n_prev_edge_num; i < n_edge_num; ++ i) { typename CSystemType::_TyConstEdgeRef r_edge = system.r_Edge_Pool()[i]; for(size_t j = 0, m = r_edge.n_Vertex_Num(); j < m; ++ j) n_order_min = std::min(n_order_min, /*m_p_lambda_block_ordering[m_r_*/r_edge.n_Vertex_Id(j)/*]*/); // note that these are ids, but these equal order at the moment } size_t n_elem_order_min = r_prev_marginals.n_BlockColumn_Base(n_order_min); // no ordering here, that is correct (omega is not ordered either) #endif // 0 // not needed, we dont form omega here std::vector<size_t> required_column_list; required_column_list.reserve(2 * (n_edge_num - n_prev_edge_num)); // a guess; might be edges with more or less than 2 verts for(size_t i = n_prev_edge_num; i < n_edge_num; ++ i) { // not parallel! (wouls have conflicts) typename CSystemType::_TyConstEdgeRef r_edge = system.r_Edge_Pool()[i]; for(size_t j = 0, m = r_edge.n_Vertex_Num(); j < m; ++ j) required_column_list.push_back(r_edge.n_Vertex_Id(j)); } // get omega and vertex id's std::sort(required_column_list.begin(), required_column_list.end()); required_column_list.erase(std::unique(required_column_list.begin(), required_column_list.end()), required_column_list.end()); // finalize the required column list (could use std::set, but that feels like overkill) size_t n_Tu_full_column_num = 0; for(size_t i = 0, n = required_column_list.size(); i < n; ++ i) n_Tu_full_column_num += lambda.n_BlockColumn_Column_Num(required_column_list[i]); // calculate how many columns do we need to backsubstitute for return double(n_Tu_full_column_num) * sizeof(double) * n_row_size; // double so we don't have to care about overflows (as much) } /** * @brief a simple heuristic deciding whether it is better to update the marginals incrementally or batch * * @tparam CSystemType is optimized system type (for optimized FBS calls) * * @param[in] system is reference to the optimized system * @param[in] r_prev_marginals is reference to the marginals to be updated (must be in natural order) * @param[in] r_lambda_in_natural_order is reference to the lambda matrix (must be in natural order) * @param[in] r_R is reference to ordered Cholesky factorization of r_lambda_in_natural_order * @param[in] mord is reference to the ordering, used in the Cholesky factorization of r_R (unused * at the moment, but could be useful to get a more precise approximate taking resumed * backsubstitution into account) * @param[in] n_edges_in_prev_marginals is number of edges in r_prev_marginals * @param[in] n_matrix_part is combination of EBlockMatrixPart * * @return Returns true on success, false on numerical issue * (in that case, r_prev_marginals is not modified). * * @note This function throws std::bad_alloc. * @note The implementation of this function will almost certainly change. */ template <class CSystemType> static bool b_PreferIncremental(const CSystemType &system, const CUberBlockMatrix &r_prev_marginals, const CUberBlockMatrix &r_lambda_in_natural_order, const CUberBlockMatrix &r_R, const CMatrixOrdering &mord, size_t n_edges_in_prev_marginals, EBlockMatrixPart n_matrix_part) // throw(std::bad_alloc) { double f_mem_cost = f_MarginalsUpdate_DenseBytes(system, r_prev_marginals, r_lambda_in_natural_order, r_R, mord, n_edges_in_prev_marginals, n_matrix_part); const double f_thresh = 1000000 * 64 * sizeof(double); return f_mem_cost < f_thresh; // for now just prefer based on memory use of the incremental update // allow update on 1M vertices with rank-64 update, or equivalent (512 MB worth of dense data) // the problem with f_MarginalsUpdate_DenseFLOP_Num() and f_RecursiveFormula_FLOP_Num() is that // while they somehow reflect the number of FLOPs, they are incomparable due to different constant // factors; will need to verify that these numbers indeed reflect the time spent there and estimate // the constant factors on the current hardware } /** * @brief incrementally updates sparse blocky marginals on diagonal * or everywhere (version with fixed block size) * * @tparam b_enable_timer is timing enable flag (if disabled, incurs no runtime costs) * @tparam CSystemType is optimized system type (for optimized FBS calls) * * @param[in] system is reference to the optimized system * @param[in,out] r_prev_marginals is reference to the marginals to be updated (must be in natural order) * @param[in] r_lambda_in_natural_order is reference to the lambda matrix (must be in natural order) * @param[in] r_R is reference to ordered Cholesky factorization of r_lambda_in_natural_order * @param[in] mord is reference to the ordering, used in the Cholesky factorization of r_R * @param[in] n_edges_in_prev_marginals is number of edges in r_prev_marginals * @param[in] n_matrix_part is combination of EBlockMatrixPart * * @return Returns true on success, false on numerical issue * (in that case, r_prev_marginals is not modified). * * @note This function throws std::bad_alloc. */ // * @note This is only available if __SE_TYPES_SUPPORT_L_SOLVERS is defined (the Omega matrix is used). template <bool b_enable_timer, class CSystemType> static bool Update_BlockDiagonalMarginals_FBS(const CSystemType &system, CUberBlockMatrix &r_prev_marginals, const CUberBlockMatrix &r_lambda_in_natural_order, const CUberBlockMatrix &r_R, const CMatrixOrdering &mord, size_t n_edges_in_prev_marginals, EBlockMatrixPart n_matrix_part) // throw(std::bad_alloc) { typedef typename CTimerSamplerTraits<b_enable_timer>::_TyTimerSampler _TyTimerSampler; typedef typename CTimerSamplerTraits<b_enable_timer>::_TyTimerSamplerRef _TyTimerSamplerRef; typedef typename _TyTimerSampler::_TySample _TySample; // choose a timer based on whether it is enabled or not typedef typename CSystemType::_TyHessianMatrixBlockList _TyLambdaMatrixBlockSizes; // get the block sizes const CUberBlockMatrix &lambda = r_lambda_in_natural_order; // rename const size_t n_prev_edge_num = n_edges_in_prev_marginals; size_t n_edge_num = system.r_Edge_Pool().n_Size(); if(n_prev_edge_num == n_edge_num) { _ASSERTE(r_prev_marginals.b_EqualLayout(lambda)); // should have, unless someone meddled with it return true; } // another job well done, there is no source for the update if(n_matrix_part & mpart_Column) n_matrix_part = EBlockMatrixPart(n_MPart_Subtract(n_matrix_part, mpart_Column) | mpart_LastColumn); _ASSERTE((n_matrix_part & mpart_Column) != mpart_Column); // if a column is wanted, it is the last column in this context if(n_matrix_part == mpart_Nothing) { r_prev_marginals.ExtendTo(lambda.n_Row_Num(), lambda.n_Column_Num()); // so it has correct size return true; } // nothing to do if(n_matrix_part == mpart_LastBlock || n_matrix_part == mpart_LastColumn) { if(lambda.n_BlockColumn_Num() > r_prev_marginals.n_BlockColumn_Num()) { CUberBlockMatrix margs_ordered; CMarginals::Calculate_DenseMarginals_Recurrent_FBS<_TyLambdaMatrixBlockSizes>(margs_ordered, r_R, mord, /*(n_matrix_part == mpart_FullMatrix)? mpart_Nothing :*/ n_matrix_part, // mpart_FullMatrix intepreted as structure of R here (since this is an update) /*(n_matrix_part == mpart_FullMatrix)? true :*/ false); // meek point, as n_matrix_part == mpart_LastBlock || n_matrix_part == mpart_LastColumn // calculate the thing (always succeeds, except for std::bad_alloc) margs_ordered.Permute_UpperTriangular_To(r_prev_marginals, mord.p_Get_Ordering(), mord.n_Ordering_Size(), false); // no share! the original will be deleted // take care of having the correct permutation there return true; } // the last block or the last column can't be updated if the size of the matrix increased } CTimer t; _TyTimerSampler timer(t); size_t n_order_min = r_prev_marginals.n_BlockColumn_Num(); for(size_t i = n_prev_edge_num; i < n_edge_num; ++ i) { typename CSystemType::_TyConstEdgeRef r_edge = system.r_Edge_Pool()[i]; for(size_t j = 0, m = r_edge.n_Vertex_Num(); j < m; ++ j) n_order_min = std::min(n_order_min, /*m_p_lambda_block_ordering[m_r_*/r_edge.n_Vertex_Id(j)/*]*/); // note that these are ids, but these equal order at the moment } size_t n_elem_order_min = r_prev_marginals.n_BlockColumn_Base(n_order_min); // no ordering here, that is correct (omega is not ordered either) CUberBlockMatrix omega; // t_odo - this might be up to the linear solver, in LM omega will be different // in LM, a new un-damped lambda needs to be built to calculate R, so it makes a little difference std::vector<size_t> required_column_list; required_column_list.reserve(2 * (n_edge_num - n_prev_edge_num)); // a guess; might be edges with more or less than 2 verts for(size_t i = n_prev_edge_num; i < n_edge_num; ++ i) { // not parallel! (wouls have conflicts) typename CSystemType::_TyConstEdgeRef r_edge = system.r_Edge_Pool()[i]; r_edge.Calculate_Omega(omega, n_elem_order_min); for(size_t j = 0, m = r_edge.n_Vertex_Num(); j < m; ++ j) required_column_list.push_back(r_edge.n_Vertex_Id(j)); } // get omega and vertex id's std::sort(required_column_list.begin(), required_column_list.end()); required_column_list.erase(std::unique(required_column_list.begin(), required_column_list.end()), required_column_list.end()); // finalize the required column list (could use std::set, but that feels like overkill) _TySample f_omega_build_time = 0; timer.Accum_DiffSample(f_omega_build_time); return Update_BlockDiagonalMarginals_FBS_ExOmega<b_enable_timer>(system, r_prev_marginals, r_lambda_in_natural_order, r_R, mord, n_edges_in_prev_marginals, n_matrix_part, f_omega_build_time, omega, required_column_list); // massif: +35,893,584B (via a matrix product) +35,893,584B (via a matrix product) +8,388,608B (via block matrix p_Find_Block()) } /** * @brief incrementally updates sparse blocky marginals on diagonal * or everywhere (version with fixed block size), using explicitly calculated omega * * @tparam b_enable_timer is timing enable flag (if disabled, incurs no runtime costs) * @tparam CSystemType is optimized system type (for optimized FBS calls) * * @param[in] system is reference to the optimized system * @param[in,out] r_prev_marginals is reference to the marginals to be updated (must be in natural order) * @param[in] r_lambda_in_natural_order is reference to the lambda matrix (must be in natural order) * @param[in] r_R is reference to ordered Cholesky factorization of r_lambda_in_natural_order * @param[in] mord is reference to the ordering, used in the Cholesky factorization of r_R * @param[in] n_edges_in_prev_marginals is number of edges in r_prev_marginals * @param[in] n_matrix_part is combination of EBlockMatrixPart * @param[in] f_omega_build_time is time required to calculate the omega matrix, in seconds (for profiling) * @param[in] omega is value of the omega matrix * @param[in] required_column_list is the list of columns of Sigma required for updating the marginals * * @return Returns true on success, false on numerical issue * (in that case, r_prev_marginals is not modified). * * @note This function throws std::bad_alloc. */ // * @note This is only available if __SE_TYPES_SUPPORT_L_SOLVERS is defined (the Omega matrix is used). template <bool b_enable_timer, class CSystemType> static bool Update_BlockDiagonalMarginals_FBS_ExOmega(const CSystemType &system, CUberBlockMatrix &r_prev_marginals, const CUberBlockMatrix &r_lambda_in_natural_order, const CUberBlockMatrix &r_R, const CMatrixOrdering &mord, size_t n_edges_in_prev_marginals, EBlockMatrixPart n_matrix_part, double f_omega_build_time, const CUberBlockMatrix &omega, const std::vector<size_t> &required_column_list) // additional params to pass: { typedef typename CTimerSamplerTraits<b_enable_timer>::_TyTimerSampler _TyTimerSampler; typedef typename CTimerSamplerTraits<b_enable_timer>::_TyTimerSamplerRef _TyTimerSamplerRef; typedef typename _TyTimerSampler::_TySample _TySample; // choose a timer based on whether it is enabled or not typedef typename CSystemType::_TyHessianMatrixBlockList _TyLambdaMatrixBlockSizes; // get the block sizes const CUberBlockMatrix &lambda = r_lambda_in_natural_order; // rename const size_t n_prev_edge_num = n_edges_in_prev_marginals; const size_t n_edge_num = system.r_Edge_Pool().n_Size(); _ASSERTE(!required_column_list.empty()); const size_t n_order_min = required_column_list.front(); // basically contains ids of affected vertices, and is sorted const size_t n_elem_order_min = r_prev_marginals.n_BlockColumn_Base(n_order_min); // t_odo - we need to have omega; split this function ro two, with entry point where omega is already available CTimer t; _TyTimerSampler timer(t); std::vector<size_t> pack_order; pack_order.reserve(omega.n_BlockColumn_Num()); for(size_t i = 0, n = required_column_list.size(); i < n; ++ i) { size_t n_col = required_column_list[i]; size_t n_base = lambda.n_BlockColumn_Base(n_col) - n_elem_order_min; size_t n_col_size; size_t n_col_omega = omega.n_Find_BlockColumn(n_base, n_col_size); _ASSERTE(n_col_size == lambda.n_BlockColumn_Column_Num(n_col)); pack_order.push_back(n_col_omega); } // get numbers of "interesting" columns of omega (no ordering here either) const size_t n_packed_block_column_num = pack_order.size(); // remember this for(size_t i = 1; i < n_packed_block_column_num; ++ i) { size_t n_o0 = pack_order[i - 1]; size_t n_o1 = pack_order[i]; _ASSERTE(n_o1 > n_o0); // should be sorted for(size_t j = n_o0 + 1; j < n_o1; ++ j) pack_order.push_back(j); // append the rest of the cols at the end (only up to omega.n_BlockColumn_Num() / 2 of them) } _ASSERTE(pack_order.size() == omega.n_BlockColumn_Num()); // finalize the order std::vector<size_t> inv_pack_order(pack_order.size()); for(size_t i = 0, n = pack_order.size(); i < n; ++ i) inv_pack_order[pack_order[i]] = i; // inverse the order CUberBlockMatrix omega_slim; omega.Permute_UpperTriangular_To(omega_slim, &inv_pack_order[0], inv_pack_order.size(), true); // pack omega omega_slim.SliceTo(omega_slim, n_packed_block_column_num, n_packed_block_column_num, true); _ASSERTE(omega_slim.n_BlockColumn_Num() == n_packed_block_column_num); // slice off the empty columns if(n_prev_edge_num == n_edge_num + 1) { /*const size_t n_omega_elems = omega_slim.n_Column_Num(); #ifdef _DEBUG { size_t n_edge_dim = 0; typename CSystemType::_TyConstEdgeRef r_edge = system.r_Edge_Pool()[n_prev_edge_num]; for(size_t i = 0, n = r_edge.n_Vertex_Num(); i < n; ++ i) n_edge_dim += system.r_Vertex_Pool()[r_edge.n_Vertex_Id(i)].n_Dimension(); _ASSERTE(n_edge_dim == n_omega_elems); } // edge dimension should equal sum of dimensions of its vertices #endif // _DEBUG return CMarginalsUpdate_FBSKernel::template Run<CSystemType, _TyTimerSamplerRef>(timer, omega_slim, required_column_list, r_prev_marginals, r_lambda_in_natural_order, r_R, mord, n_matrix_part);*/ // todo - implement this static bool b_warn = false; if(!b_warn) { b_warn = true; fprintf(stderr, "warning: CMarginalsUpdate_FBSKernel::Run() does not honor marginal policy, not used\n"); // if this turns out to be performance bottleneck, must uncomment and rewrite the above code } /*typedef typename CTransformTypelist<typename CSystemType::_TyEdgeTypelist, CMarginalsUpdate_FBSKernel::CEdgeTypeToSumOfVertexDims>::_TyResult TEdgeVertsSizeList; // vertex sizes of those bool b_result; _ASSERTE(n_omega_elems <= INT_MAX); fbs_ut::CWrap2<CMarginalsUpdate_FBSKernel::TOuterLoop, CSystemType>::template In_ScalarSize_DecisionTree<TEdgeVertsSizeList>(int(n_omega_elems), CMarginalsUpdate_FBSKernel::TOuterContext<_TyTimerSamplerRef>(b_result, timer, omega_slim, required_column_list, r_prev_marginals, r_lambda_in_natural_order, r_R, mord, n_matrix_part)); // entere decision tree return b_result;*/ } // not proficient; t_odo - make a fake block matrix to hold all the dense mats (Tu. Bu, S, ...) so that they are aligned! // handle FBS processing (one dimension of Tu and Bu, and both dimensions // of both matrices are known at compile-time) Eigen::MatrixXd omega_dense; omega_slim.Convert_to_Dense(omega_dense); // get dense omega #ifdef _DEBUG /*double f_omega_diag_min_abs_coeff = omega_dense.diagonal().array().abs().minCoeff(); printf("debug: min diag coeff in omega: %g\n", f_omega_diag_min_abs_coeff);*/ #endif // _DEBUG //omega_dense.bottomRightCorner(omega_dense.rows() - omega_slim.n_BlockColumn_Column_Num(0), // omega_dense.cols() - omega_slim.n_BlockColumn_Column_Num(0)).diagonal().array() += .1; // slightly lift the diagonal (except the first vertex), like it would happen in damped least squares _TySample f_omega_time = f_omega_build_time; _TySample f_dense_margs_time = 0; _TySample f_update_basis_time = 0; _TySample f_update_time = 0; _TySample f_extend_time = 0; timer.Accum_DiffSample(f_omega_time); const size_t n_cur_state_size = lambda.n_Column_Num(); const size_t n_prev_state_size = r_prev_marginals.n_Column_Num(); const size_t n_prev_state_block_num = r_prev_marginals.n_BlockColumn_Num(); const size_t n_omega_elems = omega_slim.n_Column_Num(); Eigen::MatrixXd Tu_full(n_cur_state_size, n_omega_elems); // massif: 35,897,688B // do not allocate it smaller, will need these to update the new covs! _ASSERTE(n_packed_block_column_num <= INT_MAX); int _n_packed_block_column_num = int(n_packed_block_column_num); #pragma omp parallel for if(n_prev_state_block_num > 1000) for(int i = 0; i < _n_packed_block_column_num; ++ i) { // t_odo - could run in parallel, but usually needs like two to six columns (threads) size_t n_block_base_margs = lambda.n_BlockColumn_Base(required_column_list[i]); // position in the (dense) marginals size_t n_block_base_Tu = omega_slim.n_BlockColumn_Base(i); size_t n_block_cols = omega_slim.n_BlockColumn_Column_Num(i); // get dimensions of this block Eigen::Block<Eigen::MatrixXd> Tu_block = Tu_full.block(0, n_block_base_Tu, n_cur_state_size, n_block_cols); // g++ requires a temporary CMarginals::Calculate_SubblockMarginals_Fast_ColumnBand_FBS< typename CSystemType::_TyHessianMatrixBlockList>(Tu_block /*Tu_full.block(0, n_block_base_Tu, n_cur_state_size, n_block_cols)*/, r_R, n_block_base_margs, mord.p_Get_InverseOrdering(), mord.n_Ordering_Size(), mord.n_Ordering_Size()/*n_prev_state_block_num*/); // really calculate a block of dense marginals } Eigen::Block<Eigen::MatrixXd> Tu = Tu_full.topRows/*LeftCorner*/(n_prev_state_size/*, n_omega_elems*/); // assemble Tu Eigen::MatrixXd s(n_omega_elems, n_omega_elems); for(size_t i = 0; i < n_packed_block_column_num; ++ i) { size_t n_block_base_row_Tu = lambda.n_BlockColumn_Base(required_column_list[i]); // position in the (dense) marginals size_t n_block_base_row_s = omega_slim.n_BlockColumn_Base(i); size_t n_block_rows = omega_slim.n_BlockColumn_Column_Num(i); // is symmetric // get dimensions of this block if(n_block_base_row_Tu < n_prev_state_size) { s.block(n_block_base_row_s, 0, n_block_rows, n_omega_elems) = Tu.block(n_block_base_row_Tu, 0, n_block_rows, n_omega_elems); } else s.block(n_block_base_row_s, 0, n_block_rows, n_omega_elems).setZero(); // copy block from Tu to s } // cut out s (could be performed inside a sparse block matrix // multiplication, except now we don't have the data in a block matrix) timer.Accum_DiffSample(f_dense_margs_time); /*#ifdef _DEBUG Eigen::LLT<Eigen::MatrixXd, Eigen::Upper> llt(omega_dense); if(llt.info() == Eigen::Success) printf("debug: was able to use the first LL^T Cholesky\n"); // does this ever happen? it does, when one is very certain, typically when the covariance associated with the edges is high #endif // _DEBUG*/ omega_dense.triangularView<Eigen::StrictlyLower>() = omega_dense.triangularView<Eigen::StrictlyUpper>().transpose(); // need both halves! (block matrix omega doesn't contain its lower triangular part) Eigen::MatrixXd V = Eigen::MatrixXd::Identity(n_omega_elems, n_omega_elems) - s * omega_dense; // calculate V #ifdef _DEBUG /*double f_V_diag_min_abs_coeff = V.diagonal().array().abs().minCoeff(); printf("debug: min diag coeff in S: %g (will invert it)\n", f_V_diag_min_abs_coeff); // in the video this is called S*/ #endif // _DEBUG // t_odo - do more manual inversion (with LU?) of V, // in case it is not invertible, refrain to use batch marginals Eigen::FullPivLU<Eigen::MatrixXd> luV(V); if(!luV.isInvertible()) return false; // that's a runtime err //Eigen::MatrixXd Bu = ((omega_dense * luV.inverse()) * Tu.transpose()); // t_odo - produce the symmetrical product; or double the memory and have e.g. right side // of the product mutliply Tu (that way at least the computation is saved, if not storage) typedef forward_allocated_pool<double, 0, 64> CAlloc; const size_t n_omega_stride = n_Align_Up_POT(n_omega_elems, size_t(8)); double *p_bu_buffer = (double*)CAlloc::aligned_alloc(n_omega_stride * n_prev_state_size * sizeof(double)); // massif: +73,886,208B/2 Eigen::Map<Eigen::MatrixXd, Eigen::Aligned, Eigen::OuterStride<> > Bu(p_bu_buffer, n_omega_elems, n_prev_state_size, Eigen::OuterStride<>(n_omega_stride)); // why did I do that as a map? to have stride? // t_odo - try to increase n_omega_elems to the next multiple of 8 #ifdef __MARGINALS_COMPACT_UPDATE // ommit TuT Bu.noalias() = (omega_dense * luV.inverse()) * Tu.transpose(); // a suspicious matrix product, this is probably where massif detects +35,893,584B +35,893,584B #else // __MARGINALS_COMPACT_UPDATE double *p_tut_buffer = (double*)CAlloc::aligned_alloc(n_omega_stride * n_prev_state_size * sizeof(double)); // massif: +73,886,208B/2 Eigen::Map<Eigen::MatrixXd, Eigen::Aligned, Eigen::OuterStride<> > TuT(p_tut_buffer, n_omega_elems, n_prev_state_size, Eigen::OuterStride<>(n_omega_stride)); TuT = Tu.transpose(); // keep this as well, for a better memory locality Bu.noalias() = (omega_dense * luV.inverse()) * TuT; // a suspicious matrix product, this is probably where massif detects +35,893,584B +35,893,584B #endif // __MARGINALS_COMPACT_UPDATE // t_odo - produce the symmetrical product; or double the memory and have e.g. right side // of the product mutliply Tu (that way at least the computation is saved, if not storage) timer.Accum_DiffSample(f_update_basis_time); _ASSERTE(//n_matrix_part == mpart_Nothing || // handled above n_matrix_part == mpart_FullMatrix || // does not combine with anything (is addition idempotent) n_matrix_part == mpart_LastBlock || // does not combine with mpart_LastColumn or mpart_Diagonal n_matrix_part == mpart_LastColumn || n_matrix_part == mpart_Diagonal || n_matrix_part == (mpart_LastColumn | mpart_Diagonal)); // there are only a few choices of what one can compose out of mpart_* const bool b_diag_update = //n_matrix_part == mpart_FullMatrix || // no, need to update everything else n_matrix_part == mpart_LastBlock || // yes, but only in the last column //n_matrix_part == mpart_LastColumn || // no n_matrix_part == mpart_Diagonal || // yes n_matrix_part == (mpart_LastColumn | mpart_Diagonal); // yes const bool b_diag_only_last = n_matrix_part == mpart_LastBlock; _ASSERTE(!b_diag_only_last || b_diag_update); // only set if b_diag_update also set // diagonal-only update const bool b_full_update = n_matrix_part == mpart_FullMatrix || // yes //n_matrix_part == mpart_LastBlock || // no n_matrix_part == mpart_LastColumn || // yes, but only in the last column //n_matrix_part == mpart_Diagonal || // no n_matrix_part == (mpart_LastColumn | mpart_Diagonal); // yes, but only in the last column const bool b_full_only_last = (n_matrix_part & mpart_LastColumn) == mpart_LastColumn; _ASSERTE(!b_full_only_last || b_full_update); // only set if b_diag_update also set // full update const size_t n_new_vertex_num = lambda.n_BlockColumn_Num() - n_prev_state_block_num; const bool b_last_col_is_part_of_interior = !n_new_vertex_num; // the last column is a part of the inside of the old marginals _ASSERTE(b_full_update || b_diag_update); // at least one is set const bool b_update_interior = (b_diag_update && !b_diag_only_last) || (b_full_update && !b_full_only_last) || b_last_col_is_part_of_interior; // do we need to update the interior of the matrix? _ASSERTE(!b_full_only_last || !b_diag_only_last); // at most one is set _ASSERTE(!(b_diag_only_last && b_full_update)); // never at the same time; if b_diag_only_last is set, full can't be set bool b_purged = false; size_t n_max_size = 0; if(b_update_interior) { const size_t n_news_stat_block_num = lambda.n_BlockColumn_Num(); _ASSERTE(n_prev_state_block_num <= INT_MAX); int _n_prev_state_block_num = int(n_prev_state_block_num); int n_first = 0; if((b_full_update && b_full_only_last && !b_diag_update) || (b_diag_update && b_diag_only_last && !b_full_update)) // note thet both b_full_only_last and b_diag_only_last can't be set at the same time n_first = _n_prev_state_block_num - 1; // start at the last column // decide the interior update range CUberBlockMatrix new_margs_tmp; bool b_update_to_temporary = ((n_matrix_part & mpart_LastColumn) == mpart_LastColumn && !b_last_col_is_part_of_interior) && n_matrix_part != mpart_FullMatrix/*!b_full_update*/; // b_full_update means something else // decide whether to update to a temporary matrix if(b_update_to_temporary) { if(r_prev_marginals.n_Storage_Size() / 2 < (n_max_size = std::max(size_t(1048576), r_R.n_Storage_Size()))) // n_Storage_Size() is in elements, not bytes // could align up to whole blocks, like this it sometimes deletes right after allocation b_update_to_temporary = false; // don't want to use it, the relinearization will take care of throwing away the dense parts else { //printf("purge\n"); b_purged = true; } } /*else printf("cannot purge\n");*/ // only allow updating to a temporary if the marginals matrix takes much more space than the factor bool b_run_in_parallel = true; if(b_update_to_temporary) { // note that this is probably slightly more costy as it is doing M' = M + U istead of M += U //if(!n_first) r_prev_marginals.CopyLayoutTo(new_margs_tmp); /*else new_margs_tmp.ExtendTo(r_prev_marginals.n_Row_Num(), r_prev_marginals.n_Column_Num());*/ // don't, the marginals are indexed by vertex id's (well not outside of this function or at least yet), // need to avoid having empty structure there // (but it's a bummer as it would be the perfect use case for the empty structure inside a matrix) // it could be handled here using a branch, but it would get impossible to handle updates // from that matrix, everyone would need to use row-col lookups // actually with some care it would not be difficult in the new vertex loop // and this loop could mostly use the existing structure of the columns (which will lead to // unbalanced OpenMP processing if only the last column is needed, as the rest of the columns // will be empty) // but, it would make the code (even more) messy and our use cases don't involve last // block / last column only (yet?) // todo if(1) { for(int i = n_first; i < _n_prev_state_block_num; ++ i) { const size_t n_cols = new_margs_tmp.n_BlockColumn_Column_Num(i); new_margs_tmp.t_GetBlock_Log(i, i, n_cols, n_cols, true, false); // massif: 8,388,608B } // preallocate structure } else b_run_in_parallel = false; // in case the structure is not allocated, would cause conflicts } // allocate structure in the temporary matrix (to save time when looking blocks up) #pragma omp parallel for if(b_run_in_parallel && n_prev_state_block_num - n_first > 1000) for(int i = n_first; i < _n_prev_state_block_num; ++ i) { // t_odo - this needs to run in parallel const size_t n_cols = r_prev_marginals.n_BlockColumn_Column_Num(i); const size_t n_col_base = r_prev_marginals.n_BlockColumn_Base(i); const bool b_last_col = size_t(i) + 1 == n_news_stat_block_num; // new state, not the old one! if(!b_full_update || (b_full_only_last && !b_last_col)) { // if not full, then diagonal (must be doing something) _ASSERTE(!b_diag_only_last || b_last_col); // if we want to do only the last column, make sure we don't waste time on other columns #if 0 fbs_ut::CWrap2<CMarginalsUpdate_FBSKernel::TInnerLoop, fbs_ut::CCTSize2D<n_omega_elems, -1> >::template In_ColumnWidth_DecisionTree<_TyHessianMatrixBlockList>(int(n_cols), CMarginalsUpdate_FBSKernel::TInnerContext( CMarginalsUpdate_FBSKernel::TMiddleContext(r_prev_marginals, i, n_cur_state_size, n_prev_state_size, p_tut_buffer/*Tu_full.data()*/, p_bu_buffer/*Bu.data()*/), i, n_col_base, n_col_base)); #endif // 0 // n_omega_elems is not constant, would have to rewrite this a bit (it wouldn't be completely FBS) #ifdef _DEBUG if(!b_update_to_temporary) { for(size_t j = 0, m = r_prev_marginals.n_BlockColumn_Block_Num(i); j < m; ++ j) { if(r_prev_marginals.n_Block_Row(i, j) == i) continue; // not the diag block r_prev_marginals.t_Block_AtColumn(i, j).setZero(); } } #endif // _DEBUG // zero out other blocks so that it is obvious which parts are up to date (debug) if(b_update_to_temporary) { #ifdef __MARGINALS_COMPACT_UPDATE new_margs_tmp.t_GetBlock_Log(i, i, n_cols, n_cols, true, false) = r_prev_marginals.t_GetBlock_Log(i, i) - Tu.middleRows(n_col_base, n_cols).lazyProduct(Bu.middleCols(n_col_base, n_cols)); #else // __MARGINALS_COMPACT_UPDATE new_margs_tmp.t_GetBlock_Log(i, i, n_cols, n_cols, true, false) = r_prev_marginals.t_GetBlock_Log(i, i) - TuT.middleCols(n_col_base, n_cols).transpose().lazyProduct(Bu.middleCols(n_col_base, n_cols)); #endif // __MARGINALS_COMPACT_UPDATE } else { #ifdef __MARGINALS_COMPACT_UPDATE r_prev_marginals.t_GetBlock_Log(i, i) -= Tu.middleRows(n_col_base, n_cols).lazyProduct(Bu.middleCols(n_col_base, n_cols)); #else // __MARGINALS_COMPACT_UPDATE r_prev_marginals.t_GetBlock_Log(i, i) -= TuT.middleCols(n_col_base, n_cols).transpose().lazyProduct(Bu.middleCols(n_col_base, n_cols)); #endif // __MARGINALS_COMPACT_UPDATE } // maybe it would be faster using Tu_full? it makes no difference here. // t_odo - could FBS these products*/ } else { // update all the existing blocks in the current column _ASSERTE(!b_full_only_last || b_last_col); // if we want to do only the last column, make sure we don't waste time on other columns #if 0 typedef typename MakeTypelist(fbs_ut::CCTSize<n_omega_elems>, _TyHessianMatrixBlockList) _TySecondaryContext; fbs_ut::CWrap2<CMarginalsUpdate_FBSKernel::TMiddleLoop, _TySecondaryContext>::template In_ColumnWidth_DecisionTree<_TyHessianMatrixBlockList>(int(n_cols), CMarginalsUpdate_FBSKernel::TMiddleContext(r_prev_marginals, i, n_cur_state_size, n_prev_state_size, p_tut_buffer/*Tu_full.data()*/, p_bu_buffer/*Bu.data()*/)); #endif // 0 // n_omega_elems is not constant, would have to rewrite this a bit (it wouldn't be completely FBS) for(size_t j = 0, m = r_prev_marginals.n_BlockColumn_Block_Num(i); j < m; ++ j) { size_t n_row = r_prev_marginals.n_Block_Row(i, j); size_t n_row_base = r_prev_marginals.n_BlockColumn_Base(n_row); size_t n_rows = r_prev_marginals.n_BlockColumn_Column_Num(n_row); // is symmetric _ASSERTE(!b_update_to_temporary); // would copy the whole matrix anyway /*if(b_update_to_temporary) { #ifdef __MARGINALS_COMPACT_UPDATE new_margs_tmp.t_GetBlock_Log(n_row, i, n_rows, n_cols, true, false) = r_prev_marginals.t_Block_AtColumn(i, j) - Tu.middleRows(n_row_base, n_rows).lazyProduct(Bu.middleCols(n_col_base, n_cols)); #else // __MARGINALS_COMPACT_UPDATE new_margs_tmp.t_GetBlock_Log(n_row, i, n_rows, n_cols, true, false) = r_prev_marginals.t_Block_AtColumn(i, j) - TuT.middleCols(n_row_base, n_rows).transpose().lazyProduct(Bu.middleCols(n_col_base, n_cols)); #endif // __MARGINALS_COMPACT_UPDATE } else*/ { #ifdef __MARGINALS_COMPACT_UPDATE r_prev_marginals.t_Block_AtColumn(i, j) -= Tu.middleRows(n_row_base, n_rows).lazyProduct(Bu.middleCols(n_col_base, n_cols)); #else // __MARGINALS_COMPACT_UPDATE r_prev_marginals.t_Block_AtColumn(i, j) -= TuT.middleCols(n_row_base, n_rows).transpose().lazyProduct(Bu.middleCols(n_col_base, n_cols)); #endif // __MARGINALS_COMPACT_UPDATE //r_prev_marginals.t_Block_AtColumn(i, j) -= // Tu_full.block(n_row_base, 0, n_rows, n_omega_elems).lazyProduct( // Bu.block(0, n_col_base, n_omega_elems, n_cols)); // maybe it would be faster using Tu_full? it makes no difference here. // could FBS these products } } } } if(b_update_to_temporary) new_margs_tmp.Swap(r_prev_marginals); // t_odo: note that if mpart_LastColumn is selected, the matrix actually fills up to completely // dense if that is the case, we need to rewrite the loop to actually add to a different matrix // and discard the original one! // also note that leaving out-of-date blocks in the marginals will add on the complexity of deciding // whether or not to compute the on-demand updates (will have to see whether a block would have been // updated in the past, and if not, compute it - this will make the on-demand recursive marginals // calculation from R more complex as that one needs to know what blocks to use in the recursive // formula) } else { // the interior not updated, may as well cleanup r_prev_marginals.SetZero(); // not Clear(), that would delete the layout also } // update (actually "minus downdate") the existing blocks; in order to check, // however, need to update all the blocks that are already there _ASSERTE(r_prev_marginals.n_BlockColumn_Num() + n_new_vertex_num == lambda.n_BlockColumn_Num()); // it is not extended yet timer.Accum_DiffSample(f_update_time); if(!b_last_col_is_part_of_interior) { size_t n_first = 0; if((b_full_update && b_full_only_last && !b_diag_update) || (b_diag_update && b_diag_only_last && !b_full_update)) // note thet both b_full_only_last and b_diag_only_last can't be set at the same time n_first = n_new_vertex_num - 1; // start at the last column // decide the interior update range for(size_t i = n_first; i < n_new_vertex_num; ++ i) { // can't run in parallel, changes the matrix layout (and is typically quite small) size_t n_vertex = n_prev_state_block_num + i; size_t n_dim = lambda.n_BlockColumn_Column_Num(n_vertex); size_t n_block_col_in_Tu = std::find(required_column_list.begin(), required_column_list.end(), n_vertex) - required_column_list.begin(); // lower bound? size_t n_col_in_Tu = omega_slim.n_BlockColumn_Base(n_block_col_in_Tu); size_t n_row_in_Tu = lambda.n_BlockColumn_Base(n_vertex); // see which vertex it is in Tu /*r_prev_marginals.ExtendTo(r_prev_marginals.n_Row_Num() + n_dim, r_prev_marginals.n_Column_Num() + n_dim); // debug - just enlarge, no new blocks*/ const bool b_last_col = i + 1 == n_new_vertex_num; if(!b_full_update || (b_full_only_last && !b_last_col)) { // if not full, then diagonal (must be doing something) _ASSERTE(!b_diag_only_last || b_last_col); // if we want to do only the last column, make sure we don't waste time on other columns // fill in the diagonal block r_prev_marginals.t_GetBlock_Log(n_vertex, n_vertex, n_dim, n_dim, true, false) = Tu_full.block(n_row_in_Tu, n_col_in_Tu, n_dim, n_dim); // amazingly correct // need Tu_full, the blocks are not present in just Tu } else { // the last column (on the last column) _ASSERTE(!b_full_only_last || b_last_col); // if we want to do only the last column, make sure we don't waste time on other columns // fill in the whole column for(size_t j = 0, m = lambda.n_BlockColumn_Num(); j < m; ++ j) { // note that the other blocks presumably calculated from bottom to top size_t n_rows = lambda.n_BlockColumn_Column_Num(j); // should be row, but lambda is symmetric and this is in cache size_t n_row_in_Tu = lambda.n_BlockColumn_Base(j); r_prev_marginals.t_GetBlock_Log(j, n_vertex, n_rows, n_dim, true, false) = // massif: 33,554,432B Tu_full.block(n_row_in_Tu, n_col_in_Tu, n_rows, n_dim); // massif: 13,107,200B } // for each block in the column } } } // put there the new blocks // note that instead of updating what's between Tu_full, we could replace it for better // precision (or maybe instability, when mixed with the updated parts - that depends) CAlloc::aligned_free(p_bu_buffer); #ifndef __MARGINALS_COMPACT_UPDATE CAlloc::aligned_free(p_tut_buffer); #endif // !__MARGINALS_COMPACT_UPDATE // should free in catch(bad_alloc) as well, will leave memory allocated if something throws timer.Accum_DiffSample(f_extend_time); _TySample f_total_time = 0; timer.Accum_CumTime_LastSample(f_total_time); /*if(b_purged && r_prev_marginals.n_Storage_Size() / 2 > n_max_size) printf("failed purge, the matrix still takes more\n");*/ if(b_enable_timer) { printf("marginals update took %.5f msec\n", f_total_time * 1000.0); printf("\tomega: %.5f msec\n", f_omega_time * 1000.0); printf("\tTu, s: %.5f msec\n", f_dense_margs_time * 1000.0); printf("\tbasis: %.5f msec\n", f_update_basis_time * 1000.0); printf("\t upd: %.5f msec\n", f_update_time * 1000.0); printf("\t ext: %.5f msec\n", f_extend_time * 1000.0); } return true; } /** * @brief incrementally updates sparse blocky marginals on diagonal or everywhere * * @tparam b_enable_timer is timing enable flag (if disabled, incurs no runtime costs) * @tparam CSystemType is optimized system type (for optimized FBS calls) * * @param[in] system is reference to the optimized system * @param[in,out] r_prev_marginals is reference to the marginals to be updated (must be in natural order) * @param[in] r_lambda_in_natural_order is reference to the lambda matrix (must be in natural order) * @param[in] r_R is reference to ordered Cholesky factorization of r_lambda_in_natural_order * @param[in] mord is reference to the ordering, used in the Cholesky factorization of r_R * @param[in] n_edges_in_prev_marginals is number of edges in r_prev_marginals * @param[in] b_update_diag_only is diagonal update flag (if set, only the diagonal is updated) * * @return Returns true on success, false on numerical issue * (in that case, r_prev_marginals is not modified). * * @note This function throws std::bad_alloc. */ // * @note This is only available if __SE_TYPES_SUPPORT_L_SOLVERS is defined (the Omega matrix is used). template <bool b_enable_timer, class CSystemType> static bool Update_BlockDiagonalMarginals(const CSystemType &system, CUberBlockMatrix &r_prev_marginals, const CUberBlockMatrix &r_lambda_in_natural_order, const CUberBlockMatrix &r_R, const CMatrixOrdering &mord, size_t n_edges_in_prev_marginals, bool b_update_diag_only = false) // throw(std::bad_alloc) { typedef typename CTimerSamplerTraits<b_enable_timer>::_TyTimerSampler _TyTimerSampler; typedef typename CTimerSamplerTraits<b_enable_timer>::_TyTimerSamplerRef _TyTimerSamplerRef; typedef typename _TyTimerSampler::_TySample _TySample; // choose a timer based on whether it is enabled or not CTimer t; _TyTimerSampler timer(t); const CUberBlockMatrix &lambda = r_lambda_in_natural_order; // rename const size_t n_prev_edge_num = n_edges_in_prev_marginals; size_t n_edge_num = system.r_Edge_Pool().n_Size(); size_t n_order_min = r_prev_marginals.n_BlockColumn_Num(); for(size_t i = n_prev_edge_num; i < n_edge_num; ++ i) { typename CSystemType::_TyConstEdgeRef r_edge = system.r_Edge_Pool()[i]; for(size_t j = 0, m = r_edge.n_Vertex_Num(); j < m; ++ j) n_order_min = std::min(n_order_min, /*m_p_lambda_block_ordering[m_r_*/r_edge.n_Vertex_Id(j)/*]*/); // note that these are ids, but these equal order at the moment } size_t n_elem_order_min = r_prev_marginals.n_BlockColumn_Base(n_order_min); // no ordering here, that is correct (omega is not ordered either) CUberBlockMatrix omega; // todo - this might be up to the linear solver, in LM omega will be different std::vector<size_t> required_column_list; required_column_list.reserve(2 * (n_edge_num - n_prev_edge_num)); // a guess; might be edges with more or less than 2 verts for(size_t i = n_prev_edge_num; i < n_edge_num; ++ i) { // not parallel! (wouls have conflicts) typename CSystemType::_TyConstEdgeRef r_edge = system.r_Edge_Pool()[i]; r_edge.Calculate_Omega(omega, n_elem_order_min); for(size_t j = 0, m = r_edge.n_Vertex_Num(); j < m; ++ j) required_column_list.push_back(r_edge.n_Vertex_Id(j)); } // get omega and vertex id's std::sort(required_column_list.begin(), required_column_list.end()); required_column_list.erase(std::unique(required_column_list.begin(), required_column_list.end()), required_column_list.end()); // finalize the required column list (could use std::set, but that feels like overkill) std::vector<size_t> pack_order; pack_order.reserve(omega.n_BlockColumn_Num()); for(size_t i = 0, n = required_column_list.size(); i < n; ++ i) { size_t n_col = required_column_list[i]; size_t n_base = lambda.n_BlockColumn_Base(n_col) - n_elem_order_min; size_t n_col_size; size_t n_col_omega = omega.n_Find_BlockColumn(n_base, n_col_size); _ASSERTE(n_col_size == lambda.n_BlockColumn_Column_Num(n_col)); pack_order.push_back(n_col_omega); } // get numbers of "interesting" columns of omega (no ordering here either) const size_t n_packed_block_column_num = pack_order.size(); // remember this for(size_t i = 1; i < n_packed_block_column_num; ++ i) { size_t n_o0 = pack_order[i - 1]; size_t n_o1 = pack_order[i]; _ASSERTE(n_o1 > n_o0); // should be sorted for(size_t j = n_o0 + 1; j < n_o1; ++ j) pack_order.push_back(j); // append the rest of the cols at the end (only up to omega.n_BlockColumn_Num() / 2 of them) } _ASSERTE(pack_order.size() == omega.n_BlockColumn_Num()); // finalize the order std::vector<size_t> inv_pack_order(pack_order.size()); for(size_t i = 0, n = pack_order.size(); i < n; ++ i) inv_pack_order[pack_order[i]] = i; // inverse the order CUberBlockMatrix omega_slim; omega.Permute_UpperTriangular_To(omega_slim, &inv_pack_order[0], inv_pack_order.size(), true); // pack omega omega_slim.SliceTo(omega_slim, n_packed_block_column_num, n_packed_block_column_num, true); _ASSERTE(omega_slim.n_BlockColumn_Num() == n_packed_block_column_num); // slice off the empty columns Eigen::MatrixXd omega_dense; omega_slim.Convert_to_Dense(omega_dense); // get dense omega _TySample f_omega_time = 0; _TySample f_dense_margs_time = 0; _TySample f_update_basis_time = 0; _TySample f_update_time = 0; _TySample f_extend_time = 0; timer.Accum_DiffSample(f_omega_time); const size_t n_cur_state_size = lambda.n_Column_Num(); const size_t n_prev_state_size = r_prev_marginals.n_Column_Num(); const size_t n_prev_state_block_num = r_prev_marginals.n_BlockColumn_Num(); const size_t n_omega_elems = omega_slim.n_Column_Num(); Eigen::MatrixXd Tu_full(n_cur_state_size, n_omega_elems); // do not allocate it smaller, will need these to update the new covs! _ASSERTE(n_packed_block_column_num <= INT_MAX); int _n_packed_block_column_num = int(n_packed_block_column_num); #pragma omp parallel for if(n_prev_state_block_num > 1000) for(int i = 0; i < _n_packed_block_column_num; ++ i) { // t_odo - could run in parallel, but usually needs like two to six columns (threads) size_t n_block_base_margs = lambda.n_BlockColumn_Base(required_column_list[i]); // position in the (dense) marginals size_t n_block_base_Tu = omega_slim.n_BlockColumn_Base(i); size_t n_block_cols = omega_slim.n_BlockColumn_Column_Num(i); // get dimensions of this block CMarginals::Calculate_SubblockMarginals_Fast_ColumnBand_FBS< typename CSystemType::_TyHessianMatrixBlockList>( Tu_full.block(0, n_block_base_Tu, n_cur_state_size, n_block_cols), r_R, n_block_base_margs, mord.p_Get_InverseOrdering(), mord.n_Ordering_Size(), mord.n_Ordering_Size()/*n_prev_state_block_num*/); // really calculate a block of dense marginals } Eigen::Block<Eigen::MatrixXd> Tu = Tu_full.topLeftCorner(n_prev_state_size, n_omega_elems); // assemble Tu Eigen::MatrixXd s(n_omega_elems, n_omega_elems); for(size_t i = 0; i < n_packed_block_column_num; ++ i) { size_t n_block_base_row_Tu = lambda.n_BlockColumn_Base(required_column_list[i]); // position in the (dense) marginals size_t n_block_base_row_s = omega_slim.n_BlockColumn_Base(i); size_t n_block_rows = omega_slim.n_BlockColumn_Column_Num(i); // is symmetric // get dimensions of this block if(n_block_base_row_Tu < n_prev_state_size) { s.block(n_block_base_row_s, 0, n_block_rows, n_omega_elems) = Tu.block(n_block_base_row_Tu, 0, n_block_rows, n_omega_elems); } else s.block(n_block_base_row_s, 0, n_block_rows, n_omega_elems).setZero(); // copy block from Tu to s } // cut out s (could be performed inside a sparse block matrix // multiplication, except now we don't have the data in a block matrix) timer.Accum_DiffSample(f_dense_margs_time); #ifdef _DEBUG Eigen::LLT<Eigen::MatrixXd, Eigen::Upper> llt(omega_dense); if(llt.info() == Eigen::Success) printf("debug: was able to use the first LL^T Cholesky\n"); // does this ever happen? #endif // _DEBUG omega_dense.triangularView<Eigen::StrictlyLower>() = omega_dense.triangularView<Eigen::StrictlyUpper>().transpose(); // need both halves! (block matrix omega doesn't contain its lower triangular part) Eigen::MatrixXd V = Eigen::MatrixXd::Identity(n_omega_elems, n_omega_elems) - s * omega_dense; // calculate V // t_odo - do more manual inversion (with LU?) of V, // in case it is not invertible, refrain to use batch marginals Eigen::FullPivLU<Eigen::MatrixXd> luV(V); if(!luV.isInvertible()) return false; // that's a runtime err Eigen::MatrixXd Bu = ((omega_dense * luV.inverse()) * Tu.transpose()); // t_odo - produce the symmetrical product; or double the memory and have e.g. right side // of the product mutliply Tu (that way at least the computation is saved, if not storage) timer.Accum_DiffSample(f_update_basis_time); _ASSERTE(n_prev_state_block_num <= INT_MAX); int _n_prev_state_block_num = int(n_prev_state_block_num); #pragma omp parallel for if(n_prev_state_block_num > 1000) for(int i = 0; i < _n_prev_state_block_num; ++ i) { // t_odo - this needs to run in parallel size_t n_col_base = r_prev_marginals.n_BlockColumn_Base(i); size_t n_cols = r_prev_marginals.n_BlockColumn_Column_Num(i); if(b_update_diag_only) { r_prev_marginals.t_GetBlock_Log(i, i) -= Tu_full.block(n_col_base, 0, n_cols, n_omega_elems).lazyProduct( Bu.block(0, n_col_base, n_omega_elems, n_cols)); // maybe it would be faster using Tu_full? it makes no difference here. // could FBS these products } else { for(size_t j = 0, m = r_prev_marginals.n_BlockColumn_Block_Num(i); j < m; ++ j) { size_t n_row = r_prev_marginals.n_Block_Row(i, j); size_t n_row_base = r_prev_marginals.n_BlockColumn_Base(n_row); size_t n_rows = r_prev_marginals.n_BlockColumn_Column_Num(n_row); // is symmetric r_prev_marginals.t_Block_AtColumn(i, j) -= Tu_full.block(n_row_base, 0, n_rows, n_omega_elems).lazyProduct( Bu.block(0, n_col_base, n_omega_elems, n_cols)); // maybe it would be faster using Tu_full? it makes no difference here. // could FBS these products } } } // update (actually "minus downdate") the existing blocks; in order to check, // however, need to update all the blocks that are already there timer.Accum_DiffSample(f_update_time); size_t n_new_vertex_num = lambda.n_BlockColumn_Num() - n_prev_state_block_num; for(size_t i = 0; i < n_new_vertex_num; ++ i) { // can't run in parallel, changes the matrix layout size_t n_vertex = n_prev_state_block_num + i; size_t n_dim = lambda.n_BlockColumn_Column_Num(n_vertex); size_t n_block_col_in_Tu = std::find(required_column_list.begin(), required_column_list.end(), n_vertex) - required_column_list.begin(); size_t n_col_in_Tu = omega_slim.n_BlockColumn_Base(n_block_col_in_Tu); size_t n_row_in_Tu = lambda.n_BlockColumn_Base(n_vertex); // see which vertex it is in Tu /*r_prev_marginals.ExtendTo(r_prev_marginals.n_Row_Num() + n_dim, r_prev_marginals.n_Column_Num() + n_dim); // debug - just enlarge, no new blocks*/ r_prev_marginals.t_GetBlock_Log(n_vertex, n_vertex, n_dim, n_dim, true, false) = Tu_full.block(n_row_in_Tu, n_col_in_Tu, n_dim, n_dim); // amazingly correct // need Tu_full, the blocks are not present in just Tu } // put there the new blocks timer.Accum_DiffSample(f_extend_time); _TySample f_total_time = 0; timer.Accum_CumTime_LastSample(f_total_time); if(b_enable_timer) { printf("marginals update took %.5f msec\n", f_total_time * 1000.0); printf("\tomega: %.5f msec\n", f_omega_time * 1000.0); printf("\tTu, s: %.5f msec\n", f_dense_margs_time * 1000.0); printf("\tbasis: %.5f msec\n", f_update_basis_time * 1000.0); printf("\t upd: %.5f msec\n", f_update_time * 1000.0); printf("\t ext: %.5f msec\n", f_extend_time * 1000.0); } return true; } #endif // __SE_TYPES_SUPPORT_L_SOLVERS || 1 /** * @brief performs some timing benchmark of updating marginals on a SE(2) system * * @tparam CEdgeType is edge type to be added * @tparam CSystemType is optimized system type * @tparam CSolverType is solver type * * @param[in,out] system is reference to the optimized system (gets extended by some edges / vertices) * @param[in,out] solver is reference to the nonlinear solver * @param[in] information is information matrix for the new edges * * @note This reuses some of CMarginals functions to calculate / update the marginals. */ template <class CEdgeType, class CSystemType, class CSolverType> static void Test_ExperimentalIncrementalMarginals(CSystemType &system, CSolverType &solver, Eigen::Matrix3d information) { //system.r_Add_Edge(CEdgePose2D(83, 101, Eigen::Vector3d(-1.97704, -0.00703316, -3.16668), information, system)); //solver.Optimize(); // want to have 101x101 system to begin with, to make S symmetric CUberBlockMatrix lambda_prev = solver.r_Lambda(); // get a *copy* of lambda size_t n_prev_edge_num = system.r_Edge_Pool().n_Size(); // remember how many edges there are system.r_Add_Edge(CEdgeType(83, 101, Eigen::Vector3d(-1.97704, -0.00703316, -3.16668), information, system)); system.r_Add_Edge(CEdgeType(100, 101, Eigen::Vector3d(1.02465, 0.0317282, 0.024041), information, system)); //system.r_Add_Edge(CEdgeType(96, 101/*102*/, Eigen::Vector3d(0.007666, 0.00487275, -3.16637), information, system)); // loop without changing dims (not present in the original dataset) system.r_Add_Edge(CEdgeType(96, 102, Eigen::Vector3d(0.007666, 0.00487275, -3.16637), information, system)); system.r_Add_Edge(CEdgeType(96, 102, Eigen::Vector3d(0.0105401, -0.00932236, -3.11433), information, system)); system.r_Add_Edge(CEdgeType(101, 102, Eigen::Vector3d(0.99396, 0.0202571, -0.00240724), information, system)); // add some more edges solver.Optimize(0); // must call that to calculate hessians in the new lambda const CUberBlockMatrix &lambda = solver.r_Lambda(); // get system matrix (without relinearizing!) /*{ lambda_prev.Save_MatrixMarket("lambda_prev.mtx", "lambda_prev.bla"); lambda.Save_MatrixMarket("lambda_cur.mtx", "lambda_cur.bla"); }*/ // for Ela size_t n_edge_num = system.r_Edge_Pool().n_Size(); size_t n_order_min = lambda.n_BlockColumn_Num(); for(size_t i = n_prev_edge_num; i < n_edge_num; ++ i) { typename CSystemType::_TyConstEdgeRef r_edge = system.r_Edge_Pool()[i]; for(size_t j = 0, m = r_edge.n_Vertex_Num(); j < m; ++ j) n_order_min = std::min(n_order_min, /*m_p_lambda_block_ordering[m_r_*/r_edge.n_Vertex_Id(j)/*]*/); // note that these are ids, but these equal order at the moment } size_t n_elem_order_min = lambda.n_BlockColumn_Base(n_order_min); // no ordering here, that is correct (omega is not ordered either) CUberBlockMatrix cur_R_ord; CMatrixOrdering mord; mord.p_BlockOrdering(lambda, true); { const size_t *p_order = mord.p_Get_InverseOrdering(); CUberBlockMatrix lambda_perm; lambda.Permute_UpperTriangular_To(lambda_perm, p_order, mord.n_Ordering_Size(), true); cur_R_ord.CholeskyOf_FBS<typename CSystemType::_TyHessianMatrixBlockList>(lambda_perm); } // take Cholesky (unordered so it is easier to compute the margs) CUberBlockMatrix prev_R_ord; CMatrixOrdering mord_prev; mord_prev.p_BlockOrdering(lambda_prev, true); { const size_t *p_order = mord_prev.p_Get_InverseOrdering(); CUberBlockMatrix prev_lambda_perm; lambda_prev.Permute_UpperTriangular_To(prev_lambda_perm, p_order, mord_prev.n_Ordering_Size(), true); prev_R_ord.CholeskyOf_FBS<typename CSystemType::_TyHessianMatrixBlockList>(prev_lambda_perm); } // also of previous to have the sparse margs Eigen::MatrixXd margs_prev, margs; #if 0 { CUberBlockMatrix R, R_prev; R.CholeskyOf(lambda); R_prev.CholeskyOf(lambda_prev); CMarginals::Calculate_DenseMarginals_Ref(margs_prev, R_prev); CMarginals::Calculate_DenseMarginals_Ref(margs, R); } #else // 0 CMarginals::Calculate_DenseMarginals_Fast(margs_prev, prev_R_ord, mord_prev.p_Get_InverseOrdering(), mord_prev.n_Ordering_Size()); CMarginals::Calculate_DenseMarginals_Fast(margs, cur_R_ord, mord.p_Get_InverseOrdering(), mord.n_Ordering_Size()); // this more numerically stable (also faster, but that is not an issue here) // also, it is readilly verified that the result is the same as if using // unordered Cholesky in the branch above #endif // 0 // calculate marginals before and after CTimer t; CTimerSampler timer(t); double f_recurrent_time = 0; double f_reorder_time = 0; double f_compare_time = 0; double f_update_time = 0; CUberBlockMatrix sparse_margs_prev_ordered; CMarginals::Calculate_DenseMarginals_Recurrent_FBS<typename CSystemType:: _TyHessianMatrixBlockList>(sparse_margs_prev_ordered, prev_R_ord, mord, mpart_Diagonal); timer.Accum_DiffSample(f_recurrent_time); CUberBlockMatrix sparse_margs_prev; sparse_margs_prev_ordered.Permute_UpperTriangular_To(sparse_margs_prev, mord_prev.p_Get_Ordering(), mord_prev.n_Ordering_Size(), false); // no share, but not much reason for it // calculate sparse marginals in the prev step and unorder them (to have the natural order) timer.Accum_DiffSample(f_reorder_time); double f_sparse_margs_error = f_IncompleteDifference(margs_prev, sparse_margs_prev); printf("debug: sparse marginals in prev step calculated with error: %g\n", f_sparse_margs_error); // see if the sparse marginals are indeed in the natural order CUberBlockMatrix sparse_margs_cur = sparse_margs_prev; // do not want to time the copy, it is for debug purposes only, otherwise no copy would be made timer.Accum_DiffSample(f_compare_time); // compare time is a dummy timer, really, it is not even displayed #ifdef __SE_TYPES_SUPPORT_L_SOLVERS Update_BlockDiagonalMarginals<true>(system, sparse_margs_cur, lambda, cur_R_ord, mord, n_prev_edge_num); // update the marginals #endif // __SE_TYPES_SUPPORT_L_SOLVERS timer.Accum_DiffSample(f_update_time); mord_prev.p_ExtendBlockOrdering_with_Identity(sparse_margs_cur.n_BlockColumn_Num()); // make the ordering bigger, the new entries were added at the end //CUberBlockMatrix sparse_margs_cur; //sparse_margs_prev_ordered.Permute_UpperTriangular_To(sparse_margs_cur, // mord_prev.p_Get_Ordering(), mord_prev.n_Ordering_Size(), false); // no share, but not much reason for it // note that mord_prev.p_Get_Ordering() now returns the extended ordering // the current marginals are now in "natural" ordering, no need to reperm // note that in case the user will be requesting a lot of updates, it would be better // to have the marginals in the same order as R (but that might be impossible after a couple // of steps, as R would be completely or partially reordered and calculating a new R with // extended identity ordering would most likely be quite expensive) // therefore, if not benchmarking, updating just the diagonal elements should be enough // (the last block column is calculated fully anyway, unless it was a loop closure, not // extending the size of the matrix) // but anyway, if calculating the marginals on demand, the diagonal can be always reused, // potentially saving a great portion of computation double f_sparse_margs_update_error = f_IncompleteDifference(margs, sparse_margs_cur); printf("debug: sparse marginals in cur step updated with error: %g\n", f_sparse_margs_update_error); // see if the sparse marginals are indeed in the natural order double f_time_total = -f_compare_time; timer.Accum_CumTime_LastSample(f_time_total); printf("marginals took: %.5f msec\n", f_time_total * 1000); printf("\trecurrent: %.5f msec (calculated " PRIsize " nnz)\n", f_recurrent_time * 1000, sparse_margs_prev.n_NonZero_Num()); printf("\t reorder: %.5f msec\n", f_reorder_time * 1000); printf("\t update: %.5f msec (calculated " PRIsize " nnz)\n", f_update_time * 1000, sparse_margs_cur.n_NonZero_Num()); // print stats } /** * @brief performs some timing and numerical analysis benchmark of updating marginals on a SE(2) system * * @tparam CEdgeType is edge type to be added * @tparam CSystemType is optimized system type * @tparam CSolverType is solver type * * @param[in,out] system is reference to the optimized system (gets extended by some edges / vertices) * @param[in,out] solver is reference to the nonlinear solver * @param[in] information is information matrix for the new edges * * @note This reimplements most of the method to be able to access the internal state of the algorithm. */ template <class CEdgeType, class CSystemType, class CSolverType> static void Test_ExperimentalIncrementalMarginals2(CSystemType &system, CSolverType &solver, Eigen::Matrix3d information) { //system.r_Add_Edge(CEdgePose2D(83, 101, Eigen::Vector3d(-1.97704, -0.00703316, -3.16668), information, system)); //solver.Optimize(); // want to have 101x101 system to begin with, to make S symmetric CUberBlockMatrix lambda_prev = solver.r_Lambda(); // get a *copy* of lambda size_t n_prev_edge_num = system.r_Edge_Pool().n_Size(); // remember how many edges there are system.r_Add_Edge(CEdgeType(83, 101, Eigen::Vector3d(-1.97704, -0.00703316, -3.16668), information, system)); system.r_Add_Edge(CEdgeType(100, 101, Eigen::Vector3d(1.02465, 0.0317282, 0.024041), information, system)); //system.r_Add_Edge(CEdgeType(96, 101/*102*/, Eigen::Vector3d(0.007666, 0.00487275, -3.16637), information, system)); // loop without changing dims (not present in the original dataset) system.r_Add_Edge(CEdgeType(96, 102, Eigen::Vector3d(0.007666, 0.00487275, -3.16637), information, system)); system.r_Add_Edge(CEdgeType(96, 102, Eigen::Vector3d(0.0105401, -0.00932236, -3.11433), information, system)); system.r_Add_Edge(CEdgeType(101, 102, Eigen::Vector3d(0.99396, 0.0202571, -0.00240724), information, system)); // add some more edges solver.Optimize(0); // must call that to calculate hessians in the new lambda const CUberBlockMatrix &lambda = solver.r_Lambda(); // get system matrix (without relinearizing!) /*{ lambda_prev.Save_MatrixMarket("lambda_prev.mtx", "lambda_prev.bla"); lambda.Save_MatrixMarket("lambda_cur.mtx", "lambda_cur.bla"); }*/ // for Ela size_t n_edge_num = system.r_Edge_Pool().n_Size(); size_t n_order_min = lambda.n_BlockColumn_Num(); for(size_t i = n_prev_edge_num; i < n_edge_num; ++ i) { typename CSystemType::_TyConstEdgeRef r_edge = system.r_Edge_Pool()[i]; for(size_t j = 0, m = r_edge.n_Vertex_Num(); j < m; ++ j) n_order_min = std::min(n_order_min, /*m_p_lambda_block_ordering[m_r_*/r_edge.n_Vertex_Id(j)/*]*/); // note that these are ids, but these equal order at the moment } size_t n_elem_order_min = lambda.n_BlockColumn_Base(n_order_min); // no ordering here, that is correct (omega is not ordered either) CUberBlockMatrix R, cur_R_ord; R.CholeskyOf(lambda); // CMatrixOrdering mord; mord.p_BlockOrdering(lambda, true); { const size_t *p_order = mord.p_Get_InverseOrdering(); CUberBlockMatrix lambda_perm; lambda.Permute_UpperTriangular_To(lambda_perm, p_order, mord.n_Ordering_Size(), true); cur_R_ord.CholeskyOf_FBS<typename CSystemType::_TyHessianMatrixBlockList>(lambda_perm); } // take Cholesky (unordered so it is easier to compute the margs) CUberBlockMatrix R_prev; R_prev.CholeskyOf(lambda_prev); Eigen::MatrixXd margs_prev, margs; CMarginals::Calculate_DenseMarginals_Ref(margs_prev, R_prev); CMarginals::Calculate_DenseMarginals_Ref(margs, R); // calculate marginals before and after CDebug::Print_DenseMatrix_Dimensions(stdout, margs_prev, "margs_prev: "); CDebug::Print_DenseMatrix_Dimensions(stdout, margs, "margs: "); // --------- the badass inc method begins --------- CTimer t; CTimerSampler timer(t); double f_omega_time = 0; double f_dense_margs_time = 0; double f_update_basis_time = 0; double f_update_time = 0; CUberBlockMatrix omega; std::vector<size_t> required_column_list; required_column_list.reserve(2 * (n_edge_num - n_prev_edge_num)); // a guess; might be edges with more or less than 2 verts for(size_t i = n_prev_edge_num; i < n_edge_num; ++ i) { // not parallel! (wouls have conflicts) typename CSystemType::_TyConstEdgeRef r_edge = system.r_Edge_Pool()[i]; r_edge.Calculate_Omega(omega, n_elem_order_min); for(size_t j = 0, m = r_edge.n_Vertex_Num(); j < m; ++ j) required_column_list.push_back(r_edge.n_Vertex_Id(j)); } // get omega and vertex id's /*{ CUberBlockMatrix lam_diff = lambda_prev; lam_diff.ExtendTo(lambda.n_Row_Num(), lambda.n_Column_Num()); // otherwise add might fail because of different dims lambda.AddTo(lam_diff, -1); double f_err = omega.f_Norm() - lam_diff.f_Norm(); printf("debug: norm of omega is %g\n", omega.f_Norm()); printf("debug: norm of difference between lambdas is %g\n", lam_diff.f_Norm()); printf("debug: difference between lambdas and calculated omega is %g\n", f_err); }*/ // debug std::sort(required_column_list.begin(), required_column_list.end()); required_column_list.erase(std::unique(required_column_list.begin(), required_column_list.end()), required_column_list.end()); // finalize the required column list (could use std::set, but that feels like overkill) { std::vector<size_t> pack_order; pack_order.reserve(omega.n_BlockColumn_Num()); for(size_t i = 0, n = required_column_list.size(); i < n; ++ i) { size_t n_col = required_column_list[i]; size_t n_base = lambda.n_BlockColumn_Base(n_col) - n_elem_order_min; size_t n_col_size; size_t n_col_omega = omega.n_Find_BlockColumn(n_base, n_col_size); _ASSERTE(n_col_size == lambda.n_BlockColumn_Column_Num(n_col)); pack_order.push_back(n_col_omega); } // get numbers of "interesting" columns of omega (no ordering here either) const size_t n_packed_block_column_num = pack_order.size(); // remember this for(size_t i = 1; i < n_packed_block_column_num; ++ i) { size_t n_o0 = pack_order[i - 1]; size_t n_o1 = pack_order[i]; _ASSERTE(n_o1 > n_o0); // should be sorted for(size_t j = n_o0 + 1; j < n_o1; ++ j) pack_order.push_back(j); // append the rest of the cols at the end (only up to omega.n_BlockColumn_Num() / 2 of them) } _ASSERTE(pack_order.size() == omega.n_BlockColumn_Num()); // finalize the order std::vector<size_t> inv_pack_order(pack_order.size()); for(size_t i = 0, n = pack_order.size(); i < n; ++ i) inv_pack_order[pack_order[i]] = i; // inverse the order CUberBlockMatrix omega_slim; omega.Permute_UpperTriangular_To(omega_slim, &inv_pack_order[0], inv_pack_order.size(), true); // pack omega omega_slim.SliceTo(omega_slim, n_packed_block_column_num, n_packed_block_column_num, true); _ASSERTE(omega_slim.n_BlockColumn_Num() == n_packed_block_column_num); // slice off the empty columns Eigen::MatrixXd omega_dense; omega_slim.Convert_to_Dense(omega_dense); // get dense omega timer.Accum_DiffSample(f_omega_time); #ifdef _DEBUG //printf("debug: difference of norm(omega_dense) - norm(omega) = %g\n", // fabs(omega_slim.f_Norm() - omega.f_Norm())); omega.Rasterize("incmargs2_01_omega.tga"); omega_slim.Rasterize("incmargs2_02_omega-reorder.tga"); omega_slim.Rasterize("incmargs2_03_omega-dense-slim.tga"); CDebug::Print_DenseMatrix_Dimensions(stdout, omega_dense, "omega_dense: "); // debug #endif // _DEBUG _ASSERTE(margs_prev.rows() <= margs.rows()); const size_t n_prev_state_size = lambda_prev.n_Column_Num(); const size_t n_prev_state_block_num = lambda_prev.n_BlockColumn_Num(); const size_t n_omega_elems = omega_slim.n_Column_Num(); #if 0 Eigen::MatrixXd Tu_ref(n_prev_state_size, n_omega_elems); #endif // 0 Eigen::MatrixXd Tu(n_prev_state_size/*lambda.n_Row_Num()*/, n_omega_elems); // allocate it smaller to begin with (saves expensive reshuffling in conservativeResize() below) //Tu.setConstant(123456); // in case we accidentally left some values uninitialized for(size_t i = 0/*, n_rows = Tu.rows()*/; i < n_packed_block_column_num; ++ i) { // could run in parallel, but usually needs like two to six columns (threads) size_t n_block_base_margs = lambda.n_BlockColumn_Base(required_column_list[i]); // position in the (dense) marginals /*size_t n_column_orig = required_column_list[i]; const size_t *p_order = mord.p_Get_InverseOrdering(); size_t n_column_ordered = p_order[n_column_orig]; size_t n_block_base_margs_R = cur_R_ord.n_BlockColumn_Base(n_column_ordered); _ASSERTE(n_block_base_margs == n_block_base_margs_R);*/ // does not work; cur_R_ord.n_BlockColumn_Base(x) == lambda.n_BlockColumn_Base(x), but order[x] != x, and this is only if there is a single block size: need the labda :( size_t n_block_base_Tu = omega_slim.n_BlockColumn_Base(i); size_t n_block_cols = omega_slim.n_BlockColumn_Column_Num(i); // get dimensions of this block #if 0 //if(n_block_base_margs < size_t(margs_prev.rows())) { // breaks it Tu_ref.block(0, n_block_base_Tu, /*n_rows*/n_prev_state_size, n_block_cols) = margs.block(0, n_block_base_margs, /*n_rows*/n_prev_state_size, n_block_cols); /*} else { Tu_ref.block(0, n_block_base_Tu, n_rows, n_block_cols).setZero(); // !! // t_odo - if this is really the case, can make it smaller; all the zeroes // would be concentrated on the right (will save ample computation later // on when calculating the aditive update as a product of Tu) }*/ // calculate a block of dense marginals by whatever metod (in current time frame; here we just copy it from a full matrix) #endif // 0 CMarginals::Calculate_SubblockMarginals_Fast_ColumnBand_FBS<typename CSystemType::_TyHessianMatrixBlockList>( Tu.block(0, n_block_base_Tu, /*n_rows*/n_prev_state_size, n_block_cols), cur_R_ord, n_block_base_margs, mord.p_Get_InverseOrdering(), mord.n_Ordering_Size(), lambda_prev.n_BlockColumn_Num()); // really calculate a block of dense marginals } // assemble Tu #if 0 printf("debug: error of inplace marginals: %g\n", (Tu_ref - Tu).norm()); #endif // 0 //if(margs_prev.rows() < margs.rows()) // Tu.bottomLeftCorner(Tu.rows() - margs_prev.rows(), Tu.cols()).setZero(); //if(margs_prev.rows() < margs.rows()) // Tu.conservativeResize(margs_prev.rows(), Tu.cols()); // saves some computation // zero out the bottom overhanging area in case the dimensions of the // covariance matrix grew. todo - what about the right overhanging area? Eigen::MatrixXd s(n_omega_elems, n_omega_elems); //s.setConstant(123456); // in case we accidentally left some values uninitialized for(size_t i = 0; i < n_packed_block_column_num; ++ i) { size_t n_block_base_row_Tu = lambda.n_BlockColumn_Base(required_column_list[i]); // position in the (dense) marginals size_t n_block_base_row_s = omega_slim.n_BlockColumn_Base(i); size_t n_block_rows = omega_slim.n_BlockColumn_Column_Num(i); // is symmetric // get dimensions of this block if(n_block_base_row_Tu < n_prev_state_size) { s.block(n_block_base_row_s, 0, n_block_rows, n_omega_elems) = Tu.block(n_block_base_row_Tu, 0, n_block_rows, n_omega_elems); } else s.block(n_block_base_row_s, 0, n_block_rows, n_omega_elems).setZero(); // copy block from Tu to s } // cut out s (could be performed inside a sparse block matrix // multiplication, except now we don't have the data in a block matrix) /*if(margs_prev.rows() < margs.rows()) { Tu.bottomLeftCorner(Tu.rows() - margs_prev.rows(), Tu.cols()).setZero(); for(size_t i = 0, n_rows = Tu.rows(); i < n_packed_block_column_num; ++ i) { size_t n_block_base_margs = lambda.n_BlockColumn_Base(required_column_list[i]); // position in the (dense) marginals size_t n_block_base_Tu = omega_slim.n_BlockColumn_Base(i); size_t n_block_cols = omega_slim.n_BlockColumn_Column_Num(i); // get dimensions of this block if(n_block_base_margs >= size_t(margs_prev.rows())) { Tu.block(0, n_block_base_Tu, n_rows, n_block_cols).setZero(); // !! // t_odo - if this is really the case, can make it smaller; all the zeroes // would be concentrated on the right (will save ample computation later // on when calculating the aditive update as a product of Tu) } // calculate a block of dense marginals by whatever metod (in current time frame; here we just copy it from a full matrix) } }*/ // breaks it timer.Accum_DiffSample(f_dense_margs_time); #ifdef _DEBUG CDebug::Print_DenseMatrix_Dimensions(stdout, Tu, "Tu: "); CDebug::Print_DenseMatrix_Dimensions(stdout, s, "s: "); #endif // _DEBUG //#define __INCREMENTAL_MARGINALS_USE_LDLT //#define __INCREMENTAL_MARGINALS_LDLT_USE_DOUBLE_LDLT // will not work. ever. (well, to be fair, it will work if the size of the system did not change and if omega is positive semidefinite, probably not worth pursuing) //#define __INCREMENTAL_MARGINALS_TRY_LLT // waste of time? yes. #ifdef __INCREMENTAL_MARGINALS_USE_LDLT //omega_dense.triangularView<Eigen::StrictlyLower>() = // omega_dense.triangularView<Eigen::StrictlyUpper>().transpose(); // LDLT need that? no, it is Eigen::LDLT<Eigen::MatrixXd, Eigen::Upper>, it does not touch lower Eigen::LDLT<Eigen::MatrixXd, Eigen::Upper> ldlt(omega_dense); _ASSERTE(ldlt.info() == Eigen::Success); // calculate Cholesky Eigen::MatrixXd H = Eigen::MatrixXd(ldlt.matrixU()) * ldlt.transpositionsP().transpose(); // need to convert, matrixU() returns triangular view Eigen::Diagonal<const Eigen::MatrixXd> d = ldlt.vectorD(); // get H, d CDebug::Print_DenseMatrix_Dimensions(stdout, H, "H: "); CDebug::Print_DenseMatrix_Dimensions(stdout, d, "d: diagonal "); #ifndef __INCREMENTAL_MARGINALS_LDLT_USE_DOUBLE_LDLT Eigen::MatrixXd V = Eigen::MatrixXd::Identity(n_omega_elems, n_omega_elems) - H * s * H.transpose() * d.asDiagonal(); // calculate V size_t n_zero_num = 0; { CDebug::Print_DenseMatrix_in_MatlabFormat(stdout, d, "d = ", "\n", " %g"); Eigen::VectorXd d_num = d; for(int i = d_num.rows(); i > 0;) { -- i; // here if(fabs(d_num(i)) == 0) ++ n_zero_num; else break; } if(n_zero_num) printf("debug: the last %d entries in d are exactly zero\n", n_zero_num); //CDebug::Print_DenseVector_in_MatlabFormat(stdout, d.data(), d.rows(), " = "); // does not work, has some strange layout // kind of curious; this potentionally cuts down dimensionality of Tu (trailing entries of d seem to be zeroes) } // debug Eigen::MatrixXd Bu = (H.transpose() * (d.asDiagonal() * V.inverse()) * H) * Tu.transpose(); // t_odo - produce the symmetrical product; or double the memory and have e.g. right side // of the product mutliply Tu (that way at least the computation is saved, if not storage) /*const int n_zero_num = 3; // bad idea, those are almost zero, but not quite*/ //Tu.block(0, Tu.cols() - n_zero_num, Tu.rows(), n_zero_num).setZero(); //Bu.block(Bu.rows() - n_zero_num, 0, n_zero_num, Bu.cols()).setZero(); // see if we can shave Tu a bit (then we would store only a part of it and calculate only with a part of it) const size_t n_nonzero_num = n_omega_elems - n_zero_num; timer.Accum_DiffSample(f_update_basis_time); CDebug::Print_DenseMatrix_Dimensions(stdout, Tu.topLeftCorner(Tu.rows(), n_nonzero_num), "left _ cols of Tu: "); printf("debug: norm of left %d cols of Tu: %g\n", n_nonzero_num, Tu.topLeftCorner(Tu.rows(), n_nonzero_num).norm()); CDebug::Print_DenseMatrix_Dimensions(stdout, Tu.topRightCorner(Tu.rows(), n_nonzero_num), "right _ cols of Tu: "); printf("debug: norm of right %d cols of Tu: %g\n", n_nonzero_num, Tu.topRightCorner(Tu.rows(), n_nonzero_num).norm()); CDebug::Print_DenseMatrix_Dimensions(stdout, Bu.topLeftCorner(n_nonzero_num, Tu.rows()), "top _ rows of Bu: "); printf("debug: norm of top %d rows of Bu: %g\n", n_nonzero_num, Bu.topLeftCorner(n_nonzero_num, Tu.rows()).norm()); CDebug::Print_DenseMatrix_Dimensions(stdout, Bu.bottomLeftCorner(n_nonzero_num, Tu.rows()), "bottom _ rows of Bu: "); printf("debug: norm of bottom %d rows of Bu: %g\n", n_nonzero_num, Bu.bottomLeftCorner(n_nonzero_num, Tu.rows()).norm()); // all equally nonzero, no use for this formula Eigen::MatrixXd margs_diff2 = Tu * Bu; //Eigen::MatrixXd margs_diff2 = Tu.topLeftCorner(Tu.rows(), n_nonzero_num) * // Bu.topLeftCorner(n_nonzero_num, Bu.cols()); // does not work for some reason // on odometry steps, there are half of the vector zeroes, can save computation // in the update step (a small motivation why do this) #else // !__INCREMENTAL_MARGINALS_LDLT_USE_DOUBLE_LDLT #error "this is conceptually flawed, can't do double LDLT as the second matrix for LDLT is not symmetric" Eigen::VectorXd d_abs_sqrt = d.array().abs().sqrt().matrix(); Eigen::VectorXd d_sign(d.rows()); for(int i = 0, n = d.rows(); i < n; ++ i) d_sign(i) = (d(i) < 0)? -1 : 1; // make a sign function and abs(sqrt(.)) function of d Eigen::MatrixXd H_sqrt_d = d_abs_sqrt.asDiagonal() * H; // mind the order (d is in the middle of H^T * d * H) { Eigen::MatrixXd omega_dense_sym = omega_dense; omega_dense_sym.triangularView<Eigen::StrictlyLower>() = omega_dense.triangularView<Eigen::StrictlyUpper>().transpose(); // need both halves for comparison printf("debug: norm of omega - H^T * d * H: %g\n", ((H.transpose() * d.asDiagonal() * H) - omega_dense_sym).norm()); printf("debug: norm of omega - H_sqrt(d)^T * sign(d) * H_sqrt(d): %g\n", ((H_sqrt_d.transpose() * d_sign.asDiagonal() * H_sqrt_d) - omega_dense_sym).norm()); printf("debug: norm of omega - chol.reconstructedMatrix(): %g\n", (ldlt.reconstructedMatrix() - omega_dense_sym).norm()); } // make sure this is the order to multiply them Eigen::MatrixXd V = Eigen::MatrixXd::Identity(n_omega_elems, n_omega_elems) - H_sqrt_d * s * H_sqrt_d.transpose() * d_sign.asDiagonal(); // d_sign breaks the symmetry // calculate a slightly different V size_t n_nonzero_num = n_omega_elems;// - n_zero_num; Eigen::FullPivLU<Eigen::MatrixXd> luV(V); _ASSERTE(luV.isInvertible()); //Eigen::LLT<Eigen::MatrixXd, Eigen::Upper> chol(d.asDiagonal() * V.inverse()); // fails: d * V is not symmetric and not pos def Eigen::LDLT<Eigen::MatrixXd, Eigen::Upper> chol(luV.inverse()); // works, error 4.113 - quite big //Eigen::LDLT<Eigen::MatrixXd, Eigen::Upper> chol(H.transpose() * (d.asDiagonal() * V.inverse()) * H); _ASSERTE(chol.info() == Eigen::Success); Eigen::MatrixXd Vy = Eigen::MatrixXd(chol.matrixU()) * chol.transpositionsP().transpose(); // need to convert, matrixU() returns triangular view Eigen::Diagonal<const Eigen::MatrixXd> d2 = chol.vectorD(); //Eigen::MatrixXd d2 = //chol.transpositionsP().transpose() * // Eigen::MatrixXd(chol.vectorD().asDiagonal()) /** chol.transpositionsP()*/; // has any effect? // can have a smaller dot product { CDebug::Print_DenseMatrix_in_MatlabFormat(stdout, d_sign, "d_sign = "); double V_inv_symmetry = (luV.inverse() - luV.inverse().transpose()).norm(); printf("debug: error of symmetry of inverse V is: %g\n", V_inv_symmetry); Eigen::MatrixXd _V = Eigen::MatrixXd::Identity(n_omega_elems, n_omega_elems) - H * s * H.transpose() * d.asDiagonal(); Eigen::MatrixXd ref = H.transpose() * d.asDiagonal() * _V.inverse() * H; // this is what we want Eigen::MatrixXd ref2 = H_sqrt_d.transpose() * d_sign.asDiagonal() * luV.inverse() * H_sqrt_d; Eigen::MatrixXd ref3 = H_sqrt_d.transpose() * d_sign.asDiagonal() * chol.reconstructedMatrix() * H_sqrt_d; Eigen::MatrixXd rec = H_sqrt_d.transpose() * Vy.transpose() * d_sign.asDiagonal() * d2.asDiagonal() * Vy * H_sqrt_d; Eigen::MatrixXd rec2 = H_sqrt_d.transpose() * d_sign.asDiagonal() * (Vy.transpose() * d2.asDiagonal() * Vy) * H_sqrt_d; printf("debug: norm of H^T * (d * V^-1) * H - Hsqd^T * d_sign * Vsqd^-1 * Hsqd: %g\n", (ref - ref2).norm()); printf("debug: norm of H^T * (d * V^-1) * H - Hsqd^T * d_sign * chol(Vsqd^-1).reconst() * Hsqd: %g\n", (ref - ref3).norm()); printf("debug: norm of H^T * (d * V^-1) * H - Hsqd^T * (Vy^T * d_sign * d2 * Vy) * Hsqd: %g\n", (ref - rec).norm()); printf("debug: norm of H^T * (d * V^-1) * H - Hsqd^T * d_sign * (Vy^T * d2 * Vy) * Hsqd: %g\n", (ref - rec2).norm()); /*printf("debug: norm of d * V^-1 - Vy^T * d2 * Vy: %g\n", ((Vy.transpose() * d2 * Vy) - dVi).norm()); printf("debug: norm of d * V^-1 - chol.reconstructedMatrix(): %g\n", (chol.reconstructedMatrix() - dVi).norm());*/ } // can we cheat like this? /*Eigen::VectorXd d2_num = d2; d2_num.conservativeResize(n_nonzero_num); // consider just the nnz head std::vector<int> perm_vector; perm_vector.reserve(n_nonzero_num); size_t n_positive_head_size = 0, n_negative_tail_size = 0; for(size_t i = 0; i < n_nonzero_num; ++ i) { double f = d2_num(i); if(f > 0) { perm_vector.insert(perm_vector.begin() + n_positive_head_size, 1, i); ++ n_positive_head_size; } else if(f < 0) { perm_vector.push_back(i); ++ n_negative_tail_size; } } perm_vector.resize(n_negative_tail_size + n_positive_head_size);*/ // erase the rest // d2 is sorted by absolute magnitude, positive and negative entries // may be scattered, zeroes should be all at the end though (but somehow that does not always happen) /*Eigen::Map<Eigen::Matrix<int, Eigen::Dynamic, 1>, Eigen::Unaligned> perm_vec(&perm_vector[0], perm_vector.size()); // no copy is made Eigen::PermutationMatrix<Eigen::Dynamic, Eigen::Dynamic, int> perm_mat(perm_vec); // symmetric permutation matrix Eigen::MatrixXd VH = (Vy.lazyProduct(H)).block(0, 0, n_nonzero_num, n_nonzero_num); Eigen::VectorXd d2_abs(perm_vector.size()); Eigen::MatrixXd VH_perm(perm_vector.size(), perm_vector.size()), Tu_perm(Tu.rows(), perm_vector.size()); for(size_t i = 0; i < n_positive_head_size; ++ i) { d2_abs(i) = d2(perm_vector[i]); Tu_perm.col(i) = Tu.col(perm_vector[i]); VH_perm.col(i) = VH.col(perm_vector[i]).head(perm_vector.size()); } for(size_t i = n_positive_head_size, n = perm_vector.size(); i < n; ++ i) { d2_abs(i) = -d2(perm_vector[i]); Tu_perm.col(i) = Tu.col(perm_vector[i]); VH_perm.col(i) = VH.col(perm_vector[i]).head(perm_vector.size()); } VH = VH_perm; for(size_t i = 0, n = perm_vector.size(); i < n; ++ i) VH_perm.row(i) = VH.row(perm_vector[i]); // make permuted version of d2 and Tu // todo - time these permutations, potentially a lot of data is moved //VH_perm = VH * perm_mat.transpose();// * perm_mat; // perm_mat could actually be smaller than VH. troubling. Eigen::MatrixXd Bu_tr = (d2_abs.array().sqrt().matrix().asDiagonal() * VH_perm).lazyProduct(Tu_perm.transpose());*/ // save something on this product already (the lazy stuff and expression templates should distribute the .block) // could save perms by doing d2_num.array().sqrt().matrix().asDiagonal() * VW * perm_matrix * Tu_perm.transpose() or also matrix - have to figure out which one is the fastest Eigen::MatrixXd Bu_tr_perm = (/*d2.array().abs().sqrt().matrix().asDiagonal() * */Vy * H).lazyProduct(Tu.transpose()); /*Eigen::MatrixXd Bu_tr(perm_vector.size(), Bu_tr_perm.cols()); for(size_t i = 0, n = perm_vector.size(); i < n; ++ i) Bu_tr.row(i) = Bu_tr_perm.row(perm_vector[i]);*/ timer.Accum_DiffSample(f_update_basis_time); //CDebug::Print_DenseMatrix_Dimensions(stdout, Bu_tr, "Bu_tr: "); CDebug::Print_DenseMatrix_Dimensions(stdout, Bu_tr_perm, "Bu_tr_perm: "); #ifdef _DEBUG /* CDebug::Print_DenseMatrix_in_MatlabFormat(stdout, d2_num, "d2 = "); printf("perm_vector = {"); for(size_t i = 0, n = perm_vector.size(); i < n; ++ i) printf((i)? ", %d" : "%d", perm_vector[i]); printf("}\n"); printf("positive head: %d, negative tail: %d\n", n_positive_head_size, n_negative_tail_size);*/ // CDebug::Print_DenseMatrix_in_MatlabFormat(stdout, d2_abs, "d2_abs = "); // CDebug::Print_DenseMatrix_Dimensions(stdout, Tu_perm, "Tu_perm: "); #endif // _DEBUG Eigen::MatrixXd margs_diff2 = Bu_tr_perm.transpose() * d2.asDiagonal() * Bu_tr_perm; /*Eigen::MatrixXd margs_diff2 = Bu_tr.topLeftCorner(n_positive_head_size, Bu_tr.cols()).transpose() * Bu_tr.topLeftCorner(n_positive_head_size, Bu_tr.cols()) - Bu_tr.topRightCorner(n_negative_tail_size, Bu_tr.cols()).transpose() * Bu_tr.topRightCorner(n_negative_tail_size, Bu_tr.cols());*/ // slightly more complicated, but same amount of FLOPs //Eigen::MatrixXd margs_diff2 = Bu_tr.transpose() /** d2_num.asDiagonal()*/ * Bu_tr; // d2 kind of spoils it // this is super imprecise, it only trades one small matrix multiplication for LDLT (probably more expensive due to pivoting) #endif // !__INCREMENTAL_MARGINALS_LDLT_USE_DOUBLE_LDLT timer.Accum_DiffSample(f_update_time); CDebug::Print_DenseMatrix_Dimensions(stdout, V, "V: "); CDebug::Print_DenseMatrix_Dimensions(stdout, (H.transpose() * (d.asDiagonal() * V.inverse()) * H), "H^T * V^-1 * d * H: "); #else // __INCREMENTAL_MARGINALS_USE_LDLT Eigen::MatrixXd V, margs_diff2; #ifdef __INCREMENTAL_MARGINALS_TRY_LLT bool b_have_update; Eigen::LLT<Eigen::MatrixXd, Eigen::Upper> llt(omega_dense); if((b_have_update = (llt.info() == Eigen::Success))) { // non-negative matrix factorization would probably be of more use here printf("debug: was able to use the first LL^T Cholesky\n"); // this happens Eigen::MatrixXd H = llt.matrixU(); V = Eigen::MatrixXd::Identity(n_omega_elems, n_omega_elems) - H.transpose() * s * H; #if 0 Eigen::LDLT<Eigen::MatrixXd, Eigen::Upper> ldlt(V.inverse()); _ASSERTE(ldlt.info() == Eigen::Success); // sure ... Eigen::MatrixXd Vy = Eigen::MatrixXd(ldlt.matrixU()) * ldlt.transpositionsP().transpose(); // need to convert, matrixU() returns triangular view Eigen::Diagonal<const Eigen::MatrixXd> d2 = ldlt.vectorD(); // get Vy, d2 // now Bu_tr = sqrt(abs(d2)) * Vy * H * Tu.transpose() // and margs_diff2 = Bu_tr.transpose() * sign(d2) * Bu_tr // the signs can be reordered in such a way that they form // a difference of two blocked matrix multiplications // unfinished, the first LLT never seems to succeed // it should however work in damped least squares (LM) just fine #else // 0 Eigen::LLT<Eigen::MatrixXd, Eigen::Upper> llt2(V.inverse()); if(llt2.info() == Eigen::Success) { // if not, have to retreat to the other edge printf("debug: was able to use the second LL^T Cholesky\n"); // does this ever happen? Eigen::MatrixXd Bu_tr = (llt2.matrixU() * H) * Tu.transpose(); timer.Accum_DiffSample(f_update_basis_time); margs_diff2 = Bu_tr.transpose() * Bu_tr; // have a symmetric update, using less memory (and therefore less bandwidth) timer.Accum_DiffSample(f_update_time); } else if((llt2 = Eigen::LLT<Eigen::MatrixXd, Eigen::Upper>(H.transpose() * V.inverse() * H)).info() == Eigen::Success) { // or like this with H in it (need to change Bu_tr below accordingly) printf("debug: was able to use the third LL^T Cholesky\n"); // does this ever happen? Eigen::MatrixXd Bu_tr = (llt2.matrixU() /** H*/) * Tu.transpose(); // H already inside the factor (passed to chol() alongside V) timer.Accum_DiffSample(f_update_basis_time); margs_diff2 = Bu_tr.transpose() * Bu_tr; // have a symmetric update, using less memory (and therefore less bandwidth) timer.Accum_DiffSample(f_update_time); } else b_have_update = false; #endif // 0 } if(!b_have_update) { printf("debug: was NOT able to use LL^T Cholesky\n"); #else // __INCREMENTAL_MARGINALS_TRY_LLT { #endif // __INCREMENTAL_MARGINALS_TRY_LLT // above: // omega = H^Td * H // V = I - H * s * H^Td // diff = Tu * H^Td * V^-1 * H * Tu^T // here: // omega = omega * I | and I is square identity, idempotent to matrix product // V = I - I * s * omega // diff = Tu * omega * V^-1 * I * Tu^T // or: // omega = I * omega | and I is square identity, idempotent to matrix product // V = I - omega * s * I // diff = Tu * I * V^-1 * omega * Tu^T omega_dense.triangularView<Eigen::StrictlyLower>() = omega_dense.triangularView<Eigen::StrictlyUpper>().transpose(); // need both halves! (block matrix omega doesn't contain its lower triangular part) V = Eigen::MatrixXd::Identity(n_omega_elems, n_omega_elems) - s * omega_dense; // calculate V // todo - do more manual inversion (with LU?) of V, // in case it is not invertible, refrain to use batch marginals Eigen::MatrixXd Bu = ((omega_dense * V.inverse()) * Tu.transpose()); // t_odo - produce the symmetrical product; or double the memory and have e.g. right side // of the product mutliply Tu (that way at least the computation is saved, if not storage) timer.Accum_DiffSample(f_update_basis_time); margs_diff2 = Tu * Bu; // the update is a product of a block row vector and a block column vector timer.Accum_DiffSample(f_update_time); } #ifdef _DEBUG CDebug::Print_DenseMatrix_Dimensions(stdout, V, "V: "); CDebug::Print_DenseMatrix_Dimensions(stdout, (V.inverse() * omega_dense), "V^-1 * omega: "); #endif // _DEBUG #endif // __INCREMENTAL_MARGINALS_USE_LDLT #ifdef _DEBUG CDebug::Print_DenseMatrix_Dimensions(stdout, margs_diff2, "margs_diff2: "); #endif // _DEBUG Eigen::MatrixXd margs_downd = margs.topLeftCorner(margs_prev.rows(), margs_prev.cols()) + margs_diff2; /*{ Eigen::MatrixXd omega_full = omega_dense; omega_full.triangularView<Eigen::StrictlyLower>() = omega_dense.triangularView<Eigen::StrictlyUpper>().transpose(); CDebug::Print_DenseMatrix_in_MatlabFormat(stdout, omega_full, "omega_dense = "); } CDebug::Print_DenseMatrix_in_MatlabFormat(stdout, Eigen::VectorXd(omega_dense.diagonal()), "omega_dense.diagonal() = ");*/ fprintf(stderr, "norm of update: %g\n", margs_downd.norm()); fprintf(stderr, "norm of 6 x 6 bottom-right corner of the update: %g\n", margs_downd.bottomRightCorner(6, 6).norm()); /*if(margs_prev.rows() != margs.rows()) { // in case the system grew ... fprintf(stderr, "norm of inside update: %g\n", margs_downd.topLeftCorner(margs_prev.rows(), margs_prev.cols()).norm()); fprintf(stderr, "norm of 6 x 6 bottom-right corner of the inside update: %g\n", margs_downd.topLeftCorner(margs_prev.rows(), margs_prev.cols()).bottomRightCorner(6, 6).norm()); }*/ //fprintf(stderr, "norm of 60 x 60 bottom-right corner of the update: %g\n", margs_downd.bottomRightCorner(60, 60).norm()); double f_error = (margs_diff2/*.topLeftCorner(margs_prev.rows(), margs_prev.cols())*/ - (margs_prev - margs.topLeftCorner(margs_prev.rows(), margs_prev.cols()))).norm(); double f_error2 = (margs_prev - margs_downd/*.topLeftCorner(margs_prev.rows(), margs_prev.cols())*/).norm(); // now it works even if dims change fprintf(stderr, "error of differences: %g\n", f_error); fprintf(stderr, "error of marginals: %g\n", f_error2); double f_margsupd_total_time = 0; timer.Accum_CumTime_LastSample(f_margsupd_total_time); printf("marginals update took %.5f msec\n", f_margsupd_total_time * 1000); printf("\tomega: %.5f msec\n", f_omega_time * 1000); printf("\tTu, s: %.5f msec\n", f_dense_margs_time * 1000); printf("\tbasis: %.5f msec\n", f_update_basis_time * 1000); printf("\t upd: %.5f msec\n", f_update_time * 1000); } #if 0 if(lambda.b_EqualLayout(lambda_prev)) { lambda_prev.Rasterize("incmargs_00_lambda-prev.tga"); lambda.Rasterize("incmargs_01_lambda.tga"); CUberBlockMatrix omega_b; omega_b = lambda; lambda_prev.AddTo(omega_b, -1); // omega_b = lambda - lambda_prev omega_b.Rasterize("incmargs_02_lambda-diff.tga"); //size_t n_omega_block_cols = omega.n_BlockColumn_Num(); // can't count that, does not have all the columns with good dims (very sparse) size_t n_lambda_block_cols = lambda.n_BlockColumn_Num(); size_t n_first_block_col = n_order_min;//n_lambda_block_cols - n_omega_block_cols; CUberBlockMatrix omega_slice; omega_b.SliceTo(omega_slice, n_first_block_col, n_lambda_block_cols, n_first_block_col, n_lambda_block_cols, true); omega_slice.Rasterize("incmargs_03_lambda-diff-slice.tga"); omega.AddTo(omega_slice, -1); omega.Rasterize("incmargs_04_omega.tga"); omega_slice.Rasterize("incmargs_05_omega-diff.tga"); double f_norm = omega_slice.f_Norm(); printf("norm of omegas is %g\n", f_norm); CDebug::Print_DenseMatrix_Dimensions(stdout, margs_prev, "margs_prev: "); CDebug::Print_DenseMatrix_Dimensions(stdout, margs, "margs: "); Eigen::MatrixXd margs_diff = margs - margs_prev; // calculate the difference Eigen::MatrixXd Hu; Eigen::Matrix3d Hui, Hun; //omega.Convert_to_Dense(Hu); { _ASSERTE(n_edge_num - n_prev_edge_num == 1); // now we manually handle only a single edge const typename CSystemType::_TyBaseEdge &last_edge = system.r_Edge_Pool()[system.r_Edge_Pool().n_Size() - 1]; Eigen::Vector3d exp, err; last_edge.Calculate_Jacobians_Expectation_Error(Hui, Hun, exp, err); // todo - need to derive the type appropriately, probably could be a function of the pool, call back with true type (it already is for_each()? do for_one()?) // interesting problem - have an indexable vfptr, generated by a template, per type // have N types, N vfptrs, stored in an array, each contains templated function (can't!) // would need to store type id (not so hard, just not nice) /*Hui = -Hui; Hun = -Hun;*/ Hu.resize(3, 6); Hu.block(0, 0, 3, 3) = Hui; Hu.block(0, 3, 3, 3) = Hun; // Hu = [Hui Hun]; } // get Hu CDebug::Print_DenseMatrix_Dimensions(stdout, Hui, "Hui: "); CDebug::Print_DenseMatrix_Dimensions(stdout, Hun, "Hun: "); CDebug::Print_DenseMatrix_Dimensions(stdout, Hu, "Hu: "); Eigen::MatrixXd Tu = margs/*_prev*/.block(0, n_elem_order_min, margs/*_prev*/.rows(), margs/*_prev*/.cols() - n_elem_order_min); // column block (all rows in it) Eigen::MatrixXd s = margs/*_prev*/.block(n_elem_order_min, n_elem_order_min, margs/*_prev*/.rows() - n_elem_order_min, margs/*_prev*/.cols() - n_elem_order_min); // smaller square block // get Tu, s CDebug::Print_DenseMatrix_Dimensions(stdout, Tu, "Tu: "); CDebug::Print_DenseMatrix_Dimensions(stdout, s, "s: "); _ASSERTE(Hu.rows() == 3 && Hu.cols() == 6); // just two verts, tightly packed // meh; need to compress Hu and Tu (and s) to avoid empty space. probably easy with a single edge, hard with e.g. each 10 Eigen::MatrixXd Su = information.inverse() - Hu * s * Hu.transpose(); // t_odo - need to get information for every edge? how to do multiple edges? one at a time? // t_odo - omega already has information in it! // on more edges, need to accumulate Bu (below), which has rows of lambda and cols of ?measurement? dimension. // adding measurements of different dims would require resizing Bu and adding them over each other, should // still be correct. Eigen::LLT<Eigen::MatrixXd, Eigen::Upper> chol(Su.inverse()); _ASSERTE(chol.info() == Eigen::Success); Eigen::MatrixXd Vy = chol.matrixU(); //Eigen::MatrixXd Bu = Tu * (Hu.transpose() * Vy.transpose()); // Hu and Vy are small (and fixed-size), this is faster //Eigen::MatrixXd Bu_tr = (Tu * (Hu.transpose() * Vy.transpose())).transpose(); //Eigen::MatrixXd Bu_tr = Eigen::MatrixXd((Hu.transpose() * Vy.transpose()).transpose()) * Tu.transpose(); // too many transposes for Eigen, seems to work with a temporary, but should equal the above / below formula Eigen::MatrixXd Bu_tr = (Vy * Hu) * Tu.transpose(); //Eigen::MatrixXd margs_diff2 = -Bu * Bu.transpose(); Eigen::MatrixXd margs_diff2 = Bu_tr.transpose() * Bu_tr; // each component of the diff is a short dot product / small matrix multiplication // or, just get Hu' * Vy' and get Tu outside, as there needs to be some "smart" mgmt of what // to calculate (need *dense* column ranges of the marginals) CDebug::Print_DenseMatrix_Dimensions(stdout, Su, "Su: "); CDebug::Print_DenseMatrix_Dimensions(stdout, Vy, "Vy: "); //CDebug::Print_DenseMatrix_Dimensions(stdout, Bu, "Bu: "); CDebug::Print_DenseMatrix_Dimensions(stdout, Bu_tr, "Bu': "); CDebug::Print_DenseMatrix_Dimensions(stdout, margs_diff2, "margs_diff2: "); /*{ FILE *p_fw = fopen("margs_diff.m", "w"); CDebug::Print_DenseMatrix_in_MatlabFormat(p_fw, margs_diff2, "margsDiff = "); fclose(p_fw); }*/ // bluh Eigen::MatrixXd margs_upd = margs/*_prev*/ + margs_diff2; double f_error = (-margs_diff2 - margs_diff).norm(); double f_error2 = (margs_prev - margs_upd).norm(); fprintf(stderr, "norm of update: %g\n", margs_upd.norm()); fprintf(stderr, "norm of 6 x 6 bottom-right corner of the update: %g\n", margs_upd.bottomRightCorner(6, 6).norm()); //fprintf(stderr, "norm of 60 x 60 bottom-right corner of the update: %g\n", margs_upd.bottomRightCorner(60, 60).norm()); fprintf(stderr, "error of differences: %g\n", f_error); fprintf(stderr, "error of marginals: %g\n", f_error2); } // just checks omega, in case the dimensions did not change (and calculates up / downdate to the marginals btw) #endif // 0 } }; /** * @brief marginal covariances cache object */ class CMarginalCovariance { protected: mutable Eigen::MatrixXd m_matrix; /**< @brief marginal covariance matrix */ CUberBlockMatrix m_sparse; /**< @brief sparse marginal covariance matrix */ mutable bool m_b_dense_up_to_date; /**< @brief dense marginals matrix dirty flag */ size_t m_n_edges_in_last_marginals; /**< @brief number of edges used for calculation of the last marginals (only stored here; set and read from outside) */ bool m_b_can_update_marginals; /**< @brief incremental update availability flag (only stored here; set and read from outside) */ public: /** * @brief default constructor; initializes an empty marginals cache */ CMarginalCovariance() :m_b_dense_up_to_date(true), // initially both empty, that is ok m_n_edges_in_last_marginals(0), m_b_can_update_marginals(false) // initially can't update, that would amount to running full dense marginals {} /** * @brief gets the number of edges used for calculation of the marginals * @return Returns the number of edges used for calculation of the current marginals. * @note The consistency of this counter is maintained by the caller (which is the nonlinear solver). */ size_t n_Edge_Num() const { return m_n_edges_in_last_marginals; } /** * @brief updates the number of edges since the last marginals recalculation / update * @param[in] n_edge_num is number of edges used for calculation of the marginals */ void Set_Edge_Num(size_t n_edge_num) { m_n_edges_in_last_marginals = n_edge_num; } /** * @brief reads the update availability flag * @return Returns true if the marginals can be incrementally updated, false if they * need to be recalculated (that is typically after a linearization point change). * @note The consistency of this flag is maintained by the caller (which is the nonlinear solver). */ bool b_CanUpdate() const { return m_b_can_update_marginals; } /** * @brief raises the update availability flag * @note Call this after calculating marginals using batch. */ void EnableUpdate() { m_b_can_update_marginals = true; } /** * @brief clears the update availability flag * @note Call this on linearization point change. */ void DisableUpdate() { m_b_can_update_marginals = false; } #if 0 /** * @brief gets marginal covariance matrix * * @return Returns reference to the marginal covariance matrix. * * @note Depending on the policy used, this matrix might not contain all of the * marginal covariance values, or they might not be up to date. * See e.g. Request_Block() for more details. * @note The space complexity required to store the full matrix can be significant, * more efficient methods for passing only a part of the matrix will be provided. */ inline Eigen::MatrixXd &r_Matrix() { return m_matrix; } #endif // 0 /** * @brief sets marginal covariance matrix * @param[in] r_sparse is the new marginal covariance matrix * @note This function throws std::bad_alloc. */ inline void Set_SparseMatrix(const CUberBlockMatrix &r_sparse) // throw(std::bad_alloc) { m_b_dense_up_to_date = false; m_sparse = r_sparse; } /** * @brief sets marginal covariance matrix * @param[in,out] r_sparse is the new marginal covariance matrix (destroyed in the process) */ inline void Swap_SparseMatrix(CUberBlockMatrix &r_sparse) { m_b_dense_up_to_date = false; m_sparse.Swap(r_sparse); } /** * @brief gets marginal covariance matrix * * @return Returns reference to the marginal covariance matrix. * * @note Depending on the policy used, this matrix might not contain all of the * marginal covariance values, or they might not be up to date. * See e.g. Request_Block() for more details. */ inline const CUberBlockMatrix &r_SparseMatrix() const { return m_sparse; } /** * @brief gets marginal covariance matrix * * @return Returns const reference to the marginal covariance matrix. * * @note This function throws std::bad_alloc. * @note Depending on the policy used, this matrix might not contain all of the * marginal covariance values, or they might not be up to date. * See e.g. Request_Block() for more details. * @note The space complexity required to store the full matrix can be significant, * more efficient methods for passing only a part of the matrix will be provided. */ inline const Eigen::MatrixXd &r_Matrix() const // throw(std::bad_alloc) { if(!m_b_dense_up_to_date) { m_sparse.Convert_to_Dense(m_matrix); m_b_dense_up_to_date = true; } return m_matrix; } /** * @brief determines whether a specific block in the marginals matrix is up-to-date * * @param[in] n_block_row is zero-based block row index (index of the first vertex) * @param[in] n_block_column is zero-based block column index (index of the second vertex) * * @return Returns true if the queried area is up-to-date, otherwise returns false. * * @note This function just determines if the queried area is up-to-date but does * not update it. Thus, it is very fast and can be used for e.g. covariance polling. */ inline bool b_Block_UpToDate(size_t UNUSED(n_block_row), size_t UNUSED(n_block_column)) const { return true; } /** * @brief determines whether a specific block column in the marginals matrix is up-to-date * @param[in] n_block_column is zero-based block row index (index of the first vertex) * @return Returns true if the queried area is up-to-date, otherwise returns false. * @note This function just determines if the queried area is up-to-date but does * not update it. Thus, it is very fast and can be used for e.g. covariance polling. */ inline bool b_Column_UpToDate(size_t UNUSED(n_block_column)) const { return true; } /** * @brief determines whether the block diagonal in the marginals matrix is up-to-date * @return Returns true if the queried area is up-to-date, otherwise returns false. * @note This function just determines if the queried area is up-to-date but does * not update it. Thus, it is very fast and can be used for e.g. covariance polling. */ inline bool b_Diagonal_UpToDate() const { return true; } /** * @brief determines whether all of the marginals matrix is up-to-date * @return Returns true if the queried area is up-to-date, otherwise returns false. * @note This function just determines if the queried area is up-to-date but does * not update it. Thus, it is very fast and can be used for e.g. covariance polling. */ inline bool b_FullMatrix_UpToDate() const { return true; } /** * @brief makes sure that a block, corresponding to covariance of two vertices * is calculated (will perform the calculation if it is not) * * @param[in] n_block_row is zero-based block row index (index of the first vertex) * @param[in] n_block_column is zero-based block column index (index of the second vertex) * * @note Depending on the cache miss policy, this will possibly also calculate * other parts of the matrix (e.g. the whole block column or the full matrix). * @note This function throws std::bad_alloc. */ void Request_Block(size_t UNUSED(n_block_row), size_t UNUSED(n_block_column)) // throw(std::bad_alloc) { // at the moment, all of the matrix is available, requests have no effect. // this will change with more efficient algorithms in the future ... } /** * @brief makes sure that a block column, corresponding to covariance of one vertex * with all the vertices (including itself) is calculated (will perform the * calculation if it is not) * * @param[in] n_block_column is zero-based block column index * * @note Depending on the cache miss policy, this will possibly also calculate * other parts of the matrix (e.g. the full matrix). * @note This function throws std::bad_alloc. */ void Request_BlockColumn(size_t UNUSED(n_block_column)) // throw(std::bad_alloc) { // at the moment, all of the matrix is available, requests have no effect. // this will change with more efficient algorithms in the future ... } /** * @brief makes sure that block diagonal portion of the covariance matrix is calculated * (will perform the calculation if it is not) * * The block diagonal means blocks at the diagonal (corresponds to auto-covariances of each * vertex with itself). * * @note Depending on the cache miss policy, this will possibly also calculate * other parts of the matrix (e.g. the full matrix). * @note This function throws std::bad_alloc. */ void Request_BlockDiagonal() // throw(std::bad_alloc) { // at the moment, all of the matrix is available, requests have no effect. // this will change with more efficient algorithms in the future ... } /** * @brief makes sure that everything in the marginal covariance matrix is up-to-date * @note This function throws std::bad_alloc. */ void Request_Full() // throw(std::bad_alloc) { // at the moment, all of the matrix is available, requests have no effect. // this will change with more efficient algorithms in the future ... } /** * @brief diagonals of the diagonal blocks of the marginals to a file * @param[in] p_s_filename is a null-termibnated string with output filename (default "marginals.txt") * @return Returns true on success, false on failure. * @note This does not do any checking whether the matrix is up to date. */ bool Dump_Diagonal(const char *p_s_filename = "marginals.txt") const { const CUberBlockMatrix &r_marginals = r_SparseMatrix(); FILE *p_fw; if((p_fw = fopen(p_s_filename, "w"))) { for(size_t i = 0, n = r_marginals.n_BlockColumn_Num(); i < n; ++ i) { size_t n_order = r_marginals.n_BlockColumn_Base(i); size_t n_dimension = r_marginals.n_BlockColumn_Column_Num(i); // get col CUberBlockMatrix::_TyConstMatrixXdRef block = r_marginals.t_FindBlock(n_order, n_order); // get block //fprintf(p_fw, "block_%d_%d = ", int(i), int(i)); //CDebug::Print_DenseMatrix_in_MatlabFormat(p_fw, block); // prints the matrix _ASSERTE(block.rows() == block.cols() && block.cols() == n_dimension); for(size_t i = 0; i < n_dimension; ++ i) { double f = block(i, i); fprintf(p_fw, (fabs(f) > 1)? ((i)? " %.15f" : "%.15f") : ((i)? " %.15g" : "%.15g"), f); } fprintf(p_fw, "\n"); // print just the diagonal, one line per every vertex } if(ferror(p_fw)) { fclose(p_fw); return false; } return !fclose(p_fw); } // dump diagonal blocks of the marginals to a file return false; } }; /** @} */ // end of group #endif // !__MARGINALS_INCLUDED
3d7pt.c
/* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 24; tile_size[3] = 2048; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k]) + beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] + A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
kernels.h
/* * Do most of a dot product computation using two input vectors, * output an array that simply needs to be summed at the end. */ void computeDotProductHelper( int *__restrict result, const int *__restrict v1, const int *__restrict v2, const int teams, const int vLength) { #pragma omp target teams num_teams(teams) thread_limit(threadsPerBlock) { int cache[threadsPerBlock]; #pragma omp parallel { const int cacheIndex = omp_get_thread_num(); const int blockIdx_x = omp_get_team_num(); int tid = cacheIndex + blockIdx_x * threadsPerBlock; int temp = 0; while (tid < vLength) { temp += v1[tid] * v2[tid]; tid += threadsPerBlock * teams; } cache[cacheIndex] = temp; #pragma omp barrier int i = threadsPerBlock/2; while(i != 0) { if (cacheIndex < i) cache[cacheIndex] += cache[cacheIndex + i]; #pragma omp barrier i /= 2; } if (cacheIndex == 0) result[blockIdx_x] = cache[0]; } } } /* * Count how many elements in a vector exceed some threshold, outputting an array that simply needs to be summed at the end to get the final result. */ void countAboveThresholdHelper( const float *__restrict array, const float threshold, int *__restrict counter, const int teams, const int arrayLength) { #pragma omp target teams num_teams(teams) thread_limit(threadsPerBlock) { int binomial[threadsPerBlock]; #pragma omp parallel { const int cacheIndex = omp_get_thread_num(); const int blockIdx_x = omp_get_team_num(); int tid = cacheIndex + blockIdx_x * threadsPerBlock; int ptemp = 0; while (tid < arrayLength) { if (array[tid] > threshold) { ptemp++; } tid += threadsPerBlock * teams; } binomial[cacheIndex] = ptemp; #pragma omp barrier int i = threadsPerBlock/2; while(i != 0) { if (cacheIndex < i) binomial[cacheIndex] += binomial[cacheIndex + i]; #pragma omp barrier i /= 2; } if (cacheIndex == 0) counter[blockIdx_x] = binomial[0]; } } } /* * Compute connection scores for randomized signatures, normalized to a given UCmax. * random should be a pointer to a (device) array containing uniformly distributed random values between 0-1 (including 0, but not including 1; these can be generated by CURAND). * A value > 0.5 indicates the randomly-selected gene that should be up-regulated, otherwise the gene should be down-regulated. * The same random number is then also used to select the gene, by rescaling its absolute value after subtracting 0.5 into an array index (effectively). */ void computeRandomConnectionScores( const float *__restrict random, const int *__restrict reffile, float *__restrict output, const int M, const float UCmax, const int setSize, const int nRandomGenerations) { #pragma omp target teams distribute parallel for thread_limit(threadsPerBlock) for (int idx = 0; idx < nRandomGenerations; idx++) { float temp = 0.0; for(int col = idx; col < M; col += nRandomGenerations) { // For converting to in-range indices, it helps to have the random numbers from 0 (inclusive) to 1 (non-inclusive) - so just flip them float n = 1.0f - random[col]; // We'll ultimately want to normalize our results by the setSize - do it now, when round-off errors will hopefully cost less float regulateFactor = 1.0f / setSize; // If our random number now is >= 0.5, we'll downregulate - and subtract 0.5 so that 0 <= random < 0.5 if (n >= 0.5f) { regulateFactor = -regulateFactor; n -= 0.5f; } // Scale up random to become an integer index < arraySizeEnd // (int) is not equal to __float2int_rd int rangeInArray = (int)(n * U133AArrayLength * 2); // Add or subtract the randomly selected value from the array to the cumulative total temp += reffile[rangeInArray] * regulateFactor; } // Update the output, further normalizing by UCmax output[idx] = temp / UCmax; } }
thdat06.c
/* * Redistribution and use in source and binary forms, with * or without modification, are permitted provided that the * following conditions are met: * * 1. Redistributions of source code must retain this list * of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce this * list of conditions and the following disclaimer in the * documentation and/or other materials provided with the * distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. */ #include <config.h> #include <stdlib.h> #include <string.h> #include <thtk/thtk.h> #include "bits.h" #include "thdat.h" #include "thlzss.h" #include "dattypes.h" static uint32_t th06_read_uint32( struct bitstream* b) { uint32_t size = bitstream_read(b, 2); return bitstream_read(b, (size + 1) * 8); } static void th06_write_uint32( struct bitstream* b, uint32_t value) { unsigned int size = 1; if (value & 0xffffff00) { size = 2; if (value & 0xffff0000) { size = 3; if (value & 0xff000000) size = 4; } } bitstream_write(b, 2, size - 1); bitstream_write(b, size * 8, value); } static void th06_read_string( struct bitstream* b, unsigned int length, char* data) { while (length) { *data = bitstream_read(b, 8); if (!*data) break; data++; length--; } } static void th06_write_string( struct bitstream* b, unsigned int length, char* data) { unsigned int i; for (i = 0; i < length; ++i) bitstream_write(b, 8, data[i]); } static int th06_open( thdat_t* thdat, thtk_error_t** error) { char magic[4]; if (thtk_io_read(thdat->stream, magic, 4, error) != 4) return 0; if (strncmp(magic, "PBG3", 4) == 0) { struct bitstream b; bitstream_init(&b, thdat->stream); uint32_t entry_count = th06_read_uint32(&b); thdat->offset = th06_read_uint32(&b); if (thtk_io_seek(thdat->stream, thdat->offset, SEEK_SET, error) == -1) return 0; bitstream_init(&b, thdat->stream); for (unsigned int i = 0; i < entry_count; ++i) { thdat_entry_t* entry; ARRAY_GROW(thdat->entry_count, thdat->entries, entry); thdat_entry_init(entry); th06_read_uint32(&b); th06_read_uint32(&b); entry->extra = th06_read_uint32(&b); entry->offset = th06_read_uint32(&b); entry->size = th06_read_uint32(&b); th06_read_string(&b, 255, entry->name); } } else if (strncmp(magic, "PBG4", 4) == 0) { th07_header_t header; if (thtk_io_read(thdat->stream, &header, sizeof(header), error) != sizeof(header)) return 0; off_t end = thtk_io_seek(thdat->stream, 0, SEEK_END, error); if (end == -1) return 0; if (thtk_io_seek(thdat->stream, header.offset, SEEK_SET, error) == -1) return 0; thtk_io_t* entry_headers = thtk_io_open_growing_memory(error); /* XXX: I can't use header.size for this. */ if (th_unlzss(thdat->stream, entry_headers, header.size, error) == -1) return 0; const uint32_t* ptr = (uint32_t*)thtk_io_map(entry_headers, 0, header.size, error); if (!ptr) return 0; for (unsigned int i = 0; i < header.count; ++i) { thdat_entry_t* entry = NULL; ARRAY_GROW(thdat->entry_count, thdat->entries, entry); thdat_entry_init(entry); strcpy(entry->name, (char*)ptr); ptr = (uint32_t*)((char*)ptr + strlen(entry->name) + 1); entry->offset = *ptr++; entry->size = *ptr++; entry->extra = *ptr++; } thtk_io_unmap(entry_headers, (unsigned char*)ptr); thtk_io_close(entry_headers); } else { thtk_error_new(error, "magic string not recognized"); return 0; } off_t end_offset = thtk_io_seek(thdat->stream, 0, SEEK_END, error); if (end_offset == -1) return 0; if (thdat->entry_count) { thdat_entry_t* prev = NULL; for (unsigned int i = 0; i < thdat->entry_count; ++i) { thdat_entry_t* entry = &thdat->entries[i]; if (prev) prev->zsize = entry->offset - prev->offset; prev = entry; } prev->zsize = end_offset - prev->offset; } return 1; } static ssize_t th06_read( thdat_t* thdat, int entry_index, thtk_io_t* output, thtk_error_t** error) { thdat_entry_t* entry = &thdat->entries[entry_index]; unsigned char* zdata = malloc(entry->zsize); int failed; #pragma omp critical { failed = (thtk_io_seek(thdat->stream, entry->offset, SEEK_SET, error) == -1) || (thtk_io_read(thdat->stream, zdata, entry->zsize, error) != entry->zsize); } if (failed) return -1; thtk_io_t* zdata_stream = thtk_io_open_memory(zdata, entry->zsize, error); if (!zdata_stream) return -1; int ret = th_unlzss(zdata_stream, output, entry->size, error); thtk_io_close(zdata_stream); return ret; } static int th06_create( thdat_t* thdat, thtk_error_t** error) { /* 13 is the largest size the header can have, so some bytes might be * wasted. */ thdat->offset = thdat->version == 6 ? 13 : 16; if (thtk_io_seek(thdat->stream, thdat->offset, SEEK_SET, error) == -1) return 0; return 1; } static ssize_t th06_write( thdat_t* thdat, int entry_index, thtk_io_t* input, size_t input_length, thtk_error_t** error) { thdat_entry_t* entry = &thdat->entries[entry_index]; entry->size = input_length; thtk_io_t* zdata_stream = thtk_io_open_growing_memory(error); if (!zdata_stream) return -1; /* There is a chance that one of the games support uncompressed data. */ if ((entry->zsize = th_lzss(input, entry->size, zdata_stream, error)) == -1) return -1; unsigned char* zdata = thtk_io_map(zdata_stream, 0, entry->zsize, error); if (!zdata) return -1; if (thdat->version == 6) { entry->extra = 0; for (ssize_t i = 0; i < entry->zsize; ++i) entry->extra += zdata[i]; } int ret; #pragma omp critical { ret = thtk_io_write(thdat->stream, zdata, entry->zsize, error); entry->offset = thdat->offset; thdat->offset += entry->zsize; } thtk_io_unmap(zdata_stream, zdata); thtk_io_close(zdata_stream); if (ret != entry->zsize) return -1; return ret; } static int th06_close( thdat_t* thdat, thtk_error_t** error) { const char* magic = thdat->version == 6 ? "PBG3" : "PBG4"; unsigned int i; uint32_t header[3]; const uint32_t zero = 0; struct bitstream b; ssize_t buffer_size; thtk_io_t* buffer = NULL; if (thdat->version == 6) { bitstream_init(&b, thdat->stream); } else { buffer = thtk_io_open_growing_memory(error); } for (i = 0; i < thdat->entry_count; ++i) { thdat_entry_t* entry = &thdat->entries[i]; if (thdat->version == 6) { /* These values are unknown, but it seems they can be ignored. */ uint32_t unknown1 = 0; /* The same for all entries in an archive. */ uint32_t unknown2 = 0; /* Starts at a high value. * Increases by a random multiple of a thousand * per entry. */ th06_write_uint32(&b, unknown1); th06_write_uint32(&b, unknown2); th06_write_uint32(&b, entry->extra); th06_write_uint32(&b, entry->offset); th06_write_uint32(&b, entry->size); th06_write_string(&b, strlen(entry->name) + 1, entry->name); } else { if (thtk_io_write(buffer, entry->name, strlen(entry->name) + 1, error) == -1) return 0; if (thtk_io_write(buffer, &entry->offset, sizeof(uint32_t), error) != sizeof(uint32_t)) return 0; if (thtk_io_write(buffer, &entry->size, sizeof(uint32_t), error) != sizeof(uint32_t)) return 0; if (thtk_io_write(buffer, &zero, sizeof(uint32_t), error) != sizeof(uint32_t)) return 0; } } if (thdat->version == 6) { bitstream_finish(&b); } else { if (thtk_io_write(buffer, &zero, sizeof(uint32_t), error) != sizeof(uint32_t)) return 0; if ((buffer_size = thtk_io_seek(buffer, 0, SEEK_CUR, error)) == -1) return 0; if (thtk_io_seek(buffer, 0, SEEK_SET, error) == -1) return 0; if (th_lzss(buffer, buffer_size, thdat->stream, error) == -1) return 0; thtk_io_close(buffer); } if (thtk_io_seek(thdat->stream, 0, SEEK_SET, error) == -1) return 0; if (thtk_io_write(thdat->stream, magic, 4, error) == -1) return 0; if (thdat->version == 6) { bitstream_init(&b, thdat->stream); th06_write_uint32(&b, thdat->entry_count); th06_write_uint32(&b, thdat->offset); bitstream_finish(&b); } else { header[0] = thdat->entry_count; header[1] = thdat->offset; header[2] = buffer_size; if (thtk_io_write(thdat->stream, header, sizeof(header), error) == -1) return 0; } return 1; } const thdat_module_t archive_th06 = { THDAT_BASENAME, th06_open, th06_create, th06_close, th06_read, th06_write };
GB_unaryop__lnot_fp32_uint64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_fp32_uint64 // op(A') function: GB_tran__lnot_fp32_uint64 // C type: float // A type: uint64_t // cast: float cij = (float) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ uint64_t #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, aij) \ float z = (float) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_FP32 || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_fp32_uint64 ( float *Cx, // Cx and Ax may be aliased uint64_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_fp32_uint64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
burgers1d_perf_b.c
#ifndef TAPENADE #include <math.h> #endif #define Max(x,y) fmax(x,y) #define Min(x,y) fmin(x,y) #define Heaviside(x) ((x>=0)?1.0:0.0) #define u(x) u[x] #define u_b(x) u_b[x] #define u_1(x) u_1[x] #define u_1_b(x) u_1_b[x] void burgers1d_perf_b(double* u, double* u_b, double* u_1, double* u_1_b, double D, double C, int n) { int i; i=0; u_1_b(i) += (C*Max(0, u_1(i + 1)) + D)*u_b(i + 1); i=n - 2; u_1_b(i) += (-C*((-u_1(i) + u_1(i + 1))*Heaviside(-u_1(i)) + (u_1(i) - u_1(i - 1))*Heaviside(u_1(i)) + Max(0, u_1(i)) - Min(0, u_1(i))) - 2.0*D + 1)*u_b(i); u_1_b(i) += (-C*Min(0, u_1(i - 1)) + D)*u_b(i - 1); i=1; u_1_b(i) += (C*Max(0, u_1(i + 1)) + D)*u_b(i + 1); u_1_b(i) += (-C*((-u_1(i) + u_1(i + 1))*Heaviside(-u_1(i)) + (u_1(i) - u_1(i - 1))*Heaviside(u_1(i)) + Max(0, u_1(i)) - Min(0, u_1(i))) - 2.0*D + 1)*u_b(i); i=n - 1; u_1_b(i) += (-C*Min(0, u_1(i - 1)) + D)*u_b(i - 1); #pragma omp parallel for private(i) for ( i=2; i<=n - 3; i++ ) { u_1_b(i) += (C*Max(0, u_1(i + 1)) + D)*u_b(i + 1); u_1_b(i) += (-C*((-u_1(i) + u_1(i + 1))*Heaviside(-u_1(i)) + (u_1(i) - u_1(i - 1))*Heaviside(u_1(i)) + Max(0, u_1(i)) - Min(0, u_1(i))) - 2.0*D + 1)*u_b(i); u_1_b(i) += (-C*Min(0, u_1(i - 1)) + D)*u_b(i - 1); } }
simd_loop.c
/* Example of the simd construct The vectors b and c are added and the result is stored in vector a */ void simd_loop(double *a, double *b, double *c, int n) { int i; #pragma omp simd for (i=0; i<n; i++) a[i] = b[i] + c[i]; }
quantize.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % QQQ U U AAA N N TTTTT IIIII ZZZZZ EEEEE % % Q Q U U A A NN N T I ZZ E % % Q Q U U AAAAA N N N T I ZZZ EEEEE % % Q QQ U U A A N NN T I ZZ E % % QQQQ UUU A A N N T IIIII ZZZZZ EEEEE % % % % % % MagickCore Methods to Reduce the Number of Unique Colors in an Image % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Realism in computer graphics typically requires using 24 bits/pixel to % generate an image. Yet many graphic display devices do not contain the % amount of memory necessary to match the spatial and color resolution of % the human eye. The Quantize methods takes a 24 bit image and reduces % the number of colors so it can be displayed on raster device with less % bits per pixel. In most instances, the quantized image closely % resembles the original reference image. % % A reduction of colors in an image is also desirable for image % transmission and real-time animation. % % QuantizeImage() takes a standard RGB or monochrome images and quantizes % them down to some fixed number of colors. % % For purposes of color allocation, an image is a set of n pixels, where % each pixel is a point in RGB space. RGB space is a 3-dimensional % vector space, and each pixel, Pi, is defined by an ordered triple of % red, green, and blue coordinates, (Ri, Gi, Bi). % % Each primary color component (red, green, or blue) represents an % intensity which varies linearly from 0 to a maximum value, Cmax, which % corresponds to full saturation of that color. Color allocation is % defined over a domain consisting of the cube in RGB space with opposite % vertices at (0,0,0) and (Cmax, Cmax, Cmax). QUANTIZE requires Cmax = % 255. % % The algorithm maps this domain onto a tree in which each node % represents a cube within that domain. In the following discussion % these cubes are defined by the coordinate of two opposite vertices (vertex % nearest the origin in RGB space and the vertex farthest from the origin). % % The tree's root node represents the entire domain, (0,0,0) through % (Cmax,Cmax,Cmax). Each lower level in the tree is generated by % subdividing one node's cube into eight smaller cubes of equal size. % This corresponds to bisecting the parent cube with planes passing % through the midpoints of each edge. % % The basic algorithm operates in three phases: Classification, % Reduction, and Assignment. Classification builds a color description % tree for the image. Reduction collapses the tree until the number it % represents, at most, the number of colors desired in the output image. % Assignment defines the output image's color map and sets each pixel's % color by restorage_class in the reduced tree. Our goal is to minimize % the numerical discrepancies between the original colors and quantized % colors (quantization error). % % Classification begins by initializing a color description tree of % sufficient depth to represent each possible input color in a leaf. % However, it is impractical to generate a fully-formed color description % tree in the storage_class phase for realistic values of Cmax. If % colors components in the input image are quantized to k-bit precision, % so that Cmax= 2k-1, the tree would need k levels below the root node to % allow representing each possible input color in a leaf. This becomes % prohibitive because the tree's total number of nodes is 1 + % sum(i=1, k, 8k). % % A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255. % Therefore, to avoid building a fully populated tree, QUANTIZE: (1) % Initializes data structures for nodes only as they are needed; (2) % Chooses a maximum depth for the tree as a function of the desired % number of colors in the output image (currently log2(colormap size)). % % For each pixel in the input image, storage_class scans downward from % the root of the color description tree. At each level of the tree it % identifies the single node which represents a cube in RGB space % containing the pixel's color. It updates the following data for each % such node: % % n1: Number of pixels whose color is contained in the RGB cube which % this node represents; % % n2: Number of pixels whose color is not represented in a node at % lower depth in the tree; initially, n2 = 0 for all nodes except % leaves of the tree. % % Sr, Sg, Sb: Sums of the red, green, and blue component values for all % pixels not classified at a lower depth. The combination of these sums % and n2 will ultimately characterize the mean color of a set of pixels % represented by this node. % % E: the distance squared in RGB space between each pixel contained % within a node and the nodes' center. This represents the % quantization error for a node. % % Reduction repeatedly prunes the tree until the number of nodes with n2 % > 0 is less than or equal to the maximum number of colors allowed in % the output image. On any given iteration over the tree, it selects % those nodes whose E count is minimal for pruning and merges their color % statistics upward. It uses a pruning threshold, Ep, to govern node % selection as follows: % % Ep = 0 % while number of nodes with (n2 > 0) > required maximum number of colors % prune all nodes such that E <= Ep % Set Ep to minimum E in remaining nodes % % This has the effect of minimizing any quantization error when merging % two nodes together. % % When a node to be pruned has offspring, the pruning procedure invokes % itself recursively in order to prune the tree from the leaves upward. % n2, Sr, Sg, and Sb in a node being pruned are always added to the % corresponding data in that node's parent. This retains the pruned % node's color characteristics for later averaging. % % For each node, n2 pixels exist for which that node represents the % smallest volume in RGB space containing those pixel's colors. When n2 % > 0 the node will uniquely define a color in the output image. At the % beginning of reduction, n2 = 0 for all nodes except a the leaves of % the tree which represent colors present in the input image. % % The other pixel count, n1, indicates the total number of colors within % the cubic volume which the node represents. This includes n1 - n2 % pixels whose colors should be defined by nodes at a lower level in the % tree. % % Assignment generates the output image from the pruned tree. The output % image consists of two parts: (1) A color map, which is an array of % color descriptions (RGB triples) for each color present in the output % image; (2) A pixel array, which represents each pixel as an index % into the color map array. % % First, the assignment phase makes one pass over the pruned color % description tree to establish the image's color map. For each node % with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean % color of all pixels that classify no lower than this node. Each of % these colors becomes an entry in the color map. % % Finally, the assignment phase reclassifies each pixel in the pruned % tree to identify the deepest node containing the pixel's color. The % pixel's value in the pixel array becomes the index of this node's mean % color in the color map. % % This method is based on a similar algorithm written by Paul Raveling. % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/cache-view.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colormap.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/compare.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/histogram.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/random_.h" #include "MagickCore/resource_.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" /* Define declarations. */ #if !defined(__APPLE__) && !defined(TARGET_OS_IPHONE) #define CacheShift 2 #else #define CacheShift 3 #endif #define ErrorQueueLength 16 #define MaxNodes 266817 #define MaxTreeDepth 8 #define NodesInAList 1920 /* Typdef declarations. */ typedef struct _DoublePixelPacket { double red, green, blue, alpha; } DoublePixelPacket; typedef struct _NodeInfo { struct _NodeInfo *parent, *child[16]; MagickSizeType number_unique; DoublePixelPacket total_color; double quantize_error; size_t color_number, id, level; } NodeInfo; typedef struct _Nodes { NodeInfo *nodes; struct _Nodes *next; } Nodes; typedef struct _CubeInfo { NodeInfo *root; size_t colors, maximum_colors; ssize_t transparent_index; MagickSizeType transparent_pixels; DoublePixelPacket target; double distance, pruning_threshold, next_threshold; size_t nodes, free_nodes, color_number; NodeInfo *next_node; Nodes *node_queue; MemoryInfo *memory_info; ssize_t *cache; DoublePixelPacket error[ErrorQueueLength]; double weights[ErrorQueueLength]; QuantizeInfo *quantize_info; MagickBooleanType associate_alpha; ssize_t x, y; size_t depth; MagickOffsetType offset; MagickSizeType span; } CubeInfo; /* Method prototypes. */ static CubeInfo *GetCubeInfo(const QuantizeInfo *,const size_t,const size_t); static NodeInfo *GetNodeInfo(CubeInfo *,const size_t,const size_t,NodeInfo *); static MagickBooleanType AssignImageColors(Image *,CubeInfo *,ExceptionInfo *), ClassifyImageColors(CubeInfo *,const Image *,ExceptionInfo *), DitherImage(Image *,CubeInfo *,ExceptionInfo *), SetGrayscaleImage(Image *,ExceptionInfo *); static size_t DefineImageColormap(Image *,CubeInfo *,NodeInfo *); static void ClosestColor(const Image *,CubeInfo *,const NodeInfo *), DestroyCubeInfo(CubeInfo *), PruneLevel(CubeInfo *,const NodeInfo *), PruneToCubeDepth(CubeInfo *,const NodeInfo *), ReduceImageColors(const Image *,CubeInfo *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e Q u a n t i z e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireQuantizeInfo() allocates the QuantizeInfo structure. % % The format of the AcquireQuantizeInfo method is: % % QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info) { QuantizeInfo *quantize_info; quantize_info=(QuantizeInfo *) AcquireCriticalMemory(sizeof(*quantize_info)); GetQuantizeInfo(quantize_info); if (image_info != (ImageInfo *) NULL) { const char *option; quantize_info->dither_method=image_info->dither == MagickFalse ? NoDitherMethod : RiemersmaDitherMethod; option=GetImageOption(image_info,"dither"); if (option != (const char *) NULL) quantize_info->dither_method=(DitherMethod) ParseCommandOption( MagickDitherOptions,MagickFalse,option); quantize_info->measure_error=image_info->verbose; } return(quantize_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + A s s i g n I m a g e C o l o r s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AssignImageColors() generates the output image from the pruned tree. The % output image consists of two parts: (1) A color map, which is an array % of color descriptions (RGB triples) for each color present in the % output image; (2) A pixel array, which represents each pixel as an % index into the color map array. % % First, the assignment phase makes one pass over the pruned color % description tree to establish the image's color map. For each node % with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean % color of all pixels that classify no lower than this node. Each of % these colors becomes an entry in the color map. % % Finally, the assignment phase reclassifies each pixel in the pruned % tree to identify the deepest node containing the pixel's color. The % pixel's value in the pixel array becomes the index of this node's mean % color in the color map. % % The format of the AssignImageColors() method is: % % MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % */ static inline void AssociateAlphaPixel(const Image *image, const CubeInfo *cube_info,const Quantum *pixel,DoublePixelPacket *alpha_pixel) { double alpha; if ((cube_info->associate_alpha == MagickFalse) || (GetPixelAlpha(image,pixel) == OpaqueAlpha)) { alpha_pixel->red=(double) GetPixelRed(image,pixel); alpha_pixel->green=(double) GetPixelGreen(image,pixel); alpha_pixel->blue=(double) GetPixelBlue(image,pixel); alpha_pixel->alpha=(double) GetPixelAlpha(image,pixel); return; } alpha=(double) (QuantumScale*GetPixelAlpha(image,pixel)); alpha_pixel->red=alpha*GetPixelRed(image,pixel); alpha_pixel->green=alpha*GetPixelGreen(image,pixel); alpha_pixel->blue=alpha*GetPixelBlue(image,pixel); alpha_pixel->alpha=(double) GetPixelAlpha(image,pixel); } static inline void AssociateAlphaPixelInfo(const CubeInfo *cube_info, const PixelInfo *pixel,DoublePixelPacket *alpha_pixel) { double alpha; if ((cube_info->associate_alpha == MagickFalse) || (pixel->alpha == OpaqueAlpha)) { alpha_pixel->red=(double) pixel->red; alpha_pixel->green=(double) pixel->green; alpha_pixel->blue=(double) pixel->blue; alpha_pixel->alpha=(double) pixel->alpha; return; } alpha=(double) (QuantumScale*pixel->alpha); alpha_pixel->red=alpha*pixel->red; alpha_pixel->green=alpha*pixel->green; alpha_pixel->blue=alpha*pixel->blue; alpha_pixel->alpha=(double) pixel->alpha; } static inline size_t ColorToNodeId(const CubeInfo *cube_info, const DoublePixelPacket *pixel,size_t index) { size_t id; id=(size_t) (((ScaleQuantumToChar(ClampPixel(pixel->red)) >> index) & 0x01) | ((ScaleQuantumToChar(ClampPixel(pixel->green)) >> index) & 0x01) << 1 | ((ScaleQuantumToChar(ClampPixel(pixel->blue)) >> index) & 0x01) << 2); if (cube_info->associate_alpha != MagickFalse) id|=((ScaleQuantumToChar(ClampPixel(pixel->alpha)) >> index) & 0x1) << 3; return(id); } static MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info, ExceptionInfo *exception) { #define AssignImageTag "Assign/Image" ColorspaceType colorspace; ssize_t y; /* Allocate image colormap. */ colorspace=image->colorspace; if (cube_info->quantize_info->colorspace != UndefinedColorspace) (void) TransformImageColorspace(image,cube_info->quantize_info->colorspace, exception); if (AcquireImageColormap(image,cube_info->maximum_colors,exception) == MagickFalse) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); image->colors=0; cube_info->transparent_pixels=0; cube_info->transparent_index=(-1); (void) DefineImageColormap(image,cube_info,cube_info->root); /* Create a reduced color image. */ if (cube_info->quantize_info->dither_method != NoDitherMethod) (void) DitherImage(image,cube_info,exception); else { CacheView *image_view; MagickBooleanType status; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { CubeInfo cube; register Quantum *magick_restrict q; register ssize_t x; ssize_t count; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } cube=(*cube_info); for (x=0; x < (ssize_t) image->columns; x+=count) { DoublePixelPacket pixel; register const NodeInfo *node_info; register ssize_t i; size_t id, index; /* Identify the deepest node containing the pixel's color. */ for (count=1; (x+count) < (ssize_t) image->columns; count++) { PixelInfo packet; GetPixelInfoPixel(image,q+count*GetPixelChannels(image),&packet); if (IsPixelEquivalent(image,q,&packet) == MagickFalse) break; } AssociateAlphaPixel(image,&cube,q,&pixel); node_info=cube.root; for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--) { id=ColorToNodeId(&cube,&pixel,index); if (node_info->child[id] == (NodeInfo *) NULL) break; node_info=node_info->child[id]; } /* Find closest color among siblings and their children. */ cube.target=pixel; cube.distance=(double) (4.0*(QuantumRange+1.0)*(QuantumRange+1.0)+ 1.0); ClosestColor(image,&cube,node_info->parent); index=cube.color_number; for (i=0; i < (ssize_t) count; i++) { if (image->storage_class == PseudoClass) SetPixelIndex(image,(Quantum) index,q); if (cube.quantize_info->measure_error == MagickFalse) { SetPixelRed(image,ClampToQuantum( image->colormap[index].red),q); SetPixelGreen(image,ClampToQuantum( image->colormap[index].green),q); SetPixelBlue(image,ClampToQuantum( image->colormap[index].blue),q); if (cube.associate_alpha != MagickFalse) SetPixelAlpha(image,ClampToQuantum( image->colormap[index].alpha),q); } q+=GetPixelChannels(image); } } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); } if (cube_info->quantize_info->measure_error != MagickFalse) (void) GetImageQuantizeError(image,exception); if ((cube_info->quantize_info->number_colors == 2) && ((cube_info->quantize_info->colorspace == LinearGRAYColorspace) || (cube_info->quantize_info->colorspace == GRAYColorspace))) { double intensity; /* Monochrome image. */ intensity=GetPixelInfoLuma(image->colormap+0) < QuantumRange/2.0 ? 0.0 : QuantumRange; if (image->colors > 1) { intensity=0.0; if (GetPixelInfoLuma(image->colormap+0) > GetPixelInfoLuma(image->colormap+1)) intensity=(double) QuantumRange; } image->colormap[0].red=intensity; image->colormap[0].green=intensity; image->colormap[0].blue=intensity; if (image->colors > 1) { image->colormap[1].red=(double) QuantumRange-intensity; image->colormap[1].green=(double) QuantumRange-intensity; image->colormap[1].blue=(double) QuantumRange-intensity; } } (void) SyncImage(image,exception); if ((cube_info->quantize_info->colorspace != UndefinedColorspace) && (IssRGBCompatibleColorspace(colorspace) == MagickFalse)) (void) TransformImageColorspace(image,colorspace,exception); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l a s s i f y I m a g e C o l o r s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClassifyImageColors() begins by initializing a color description tree % of sufficient depth to represent each possible input color in a leaf. % However, it is impractical to generate a fully-formed color % description tree in the storage_class phase for realistic values of % Cmax. If colors components in the input image are quantized to k-bit % precision, so that Cmax= 2k-1, the tree would need k levels below the % root node to allow representing each possible input color in a leaf. % This becomes prohibitive because the tree's total number of nodes is % 1 + sum(i=1,k,8k). % % A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255. % Therefore, to avoid building a fully populated tree, QUANTIZE: (1) % Initializes data structures for nodes only as they are needed; (2) % Chooses a maximum depth for the tree as a function of the desired % number of colors in the output image (currently log2(colormap size)). % % For each pixel in the input image, storage_class scans downward from % the root of the color description tree. At each level of the tree it % identifies the single node which represents a cube in RGB space % containing It updates the following data for each such node: % % n1 : Number of pixels whose color is contained in the RGB cube % which this node represents; % % n2 : Number of pixels whose color is not represented in a node at % lower depth in the tree; initially, n2 = 0 for all nodes except % leaves of the tree. % % Sr, Sg, Sb : Sums of the red, green, and blue component values for % all pixels not classified at a lower depth. The combination of % these sums and n2 will ultimately characterize the mean color of a % set of pixels represented by this node. % % E: the distance squared in RGB space between each pixel contained % within a node and the nodes' center. This represents the quantization % error for a node. % % The format of the ClassifyImageColors() method is: % % MagickBooleanType ClassifyImageColors(CubeInfo *cube_info, % const Image *image,ExceptionInfo *exception) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o image: the image. % */ static inline void SetAssociatedAlpha(const Image *image,CubeInfo *cube_info) { MagickBooleanType associate_alpha; associate_alpha=image->alpha_trait == BlendPixelTrait ? MagickTrue : MagickFalse; if ((cube_info->quantize_info->number_colors == 2) && ((cube_info->quantize_info->colorspace == LinearGRAYColorspace) || (cube_info->quantize_info->colorspace == GRAYColorspace))) associate_alpha=MagickFalse; cube_info->associate_alpha=associate_alpha; } static MagickBooleanType ClassifyImageColors(CubeInfo *cube_info, const Image *image,ExceptionInfo *exception) { #define ClassifyImageTag "Classify/Image" CacheView *image_view; DoublePixelPacket error, mid, midpoint, pixel; MagickBooleanType proceed; double bisect; NodeInfo *node_info; size_t count, id, index, level; ssize_t y; /* Classify the first cube_info->maximum_colors colors to a tree depth of 8. */ SetAssociatedAlpha(image,cube_info); if (cube_info->quantize_info->colorspace != image->colorspace) { if ((cube_info->quantize_info->colorspace != UndefinedColorspace) && (cube_info->quantize_info->colorspace != CMYKColorspace)) (void) TransformImageColorspace((Image *) image, cube_info->quantize_info->colorspace,exception); else if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) (void) TransformImageColorspace((Image *) image,sRGBColorspace, exception); } midpoint.red=(double) QuantumRange/2.0; midpoint.green=(double) QuantumRange/2.0; midpoint.blue=(double) QuantumRange/2.0; midpoint.alpha=(double) QuantumRange/2.0; error.alpha=0.0; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; if (cube_info->nodes > MaxNodes) { /* Prune one level if the color tree is too large. */ PruneLevel(cube_info,cube_info->root); cube_info->depth--; } for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count) { /* Start at the root and descend the color cube tree. */ for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++) { PixelInfo packet; GetPixelInfoPixel(image,p+count*GetPixelChannels(image),&packet); if (IsPixelEquivalent(image,p,&packet) == MagickFalse) break; } AssociateAlphaPixel(image,cube_info,p,&pixel); index=MaxTreeDepth-1; bisect=((double) QuantumRange+1.0)/2.0; mid=midpoint; node_info=cube_info->root; for (level=1; level <= MaxTreeDepth; level++) { double distance; bisect*=0.5; id=ColorToNodeId(cube_info,&pixel,index); mid.red+=(id & 1) != 0 ? bisect : -bisect; mid.green+=(id & 2) != 0 ? bisect : -bisect; mid.blue+=(id & 4) != 0 ? bisect : -bisect; mid.alpha+=(id & 8) != 0 ? bisect : -bisect; if (node_info->child[id] == (NodeInfo *) NULL) { /* Set colors of new node to contain pixel. */ node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info); if (node_info->child[id] == (NodeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); continue; } if (level == MaxTreeDepth) cube_info->colors++; } /* Approximate the quantization error represented by this node. */ node_info=node_info->child[id]; error.red=QuantumScale*(pixel.red-mid.red); error.green=QuantumScale*(pixel.green-mid.green); error.blue=QuantumScale*(pixel.blue-mid.blue); if (cube_info->associate_alpha != MagickFalse) error.alpha=QuantumScale*(pixel.alpha-mid.alpha); distance=(double) (error.red*error.red+error.green*error.green+ error.blue*error.blue+error.alpha*error.alpha); if (IsNaN(distance)) distance=0.0; node_info->quantize_error+=count*sqrt(distance); cube_info->root->quantize_error+=node_info->quantize_error; index--; } /* Sum RGB for this leaf for later derivation of the mean cube color. */ node_info->number_unique+=count; node_info->total_color.red+=count*QuantumScale*ClampPixel(pixel.red); node_info->total_color.green+=count*QuantumScale*ClampPixel(pixel.green); node_info->total_color.blue+=count*QuantumScale*ClampPixel(pixel.blue); if (cube_info->associate_alpha != MagickFalse) node_info->total_color.alpha+=count*QuantumScale* ClampPixel(pixel.alpha); else node_info->total_color.alpha+=count*QuantumScale* ClampPixel((MagickRealType) OpaqueAlpha); p+=count*GetPixelChannels(image); } if (cube_info->colors > cube_info->maximum_colors) { PruneToCubeDepth(cube_info,cube_info->root); break; } proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) break; } for (y++; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; if (cube_info->nodes > MaxNodes) { /* Prune one level if the color tree is too large. */ PruneLevel(cube_info,cube_info->root); cube_info->depth--; } for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count) { /* Start at the root and descend the color cube tree. */ for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++) { PixelInfo packet; GetPixelInfoPixel(image,p+count*GetPixelChannels(image),&packet); if (IsPixelEquivalent(image,p,&packet) == MagickFalse) break; } AssociateAlphaPixel(image,cube_info,p,&pixel); index=MaxTreeDepth-1; bisect=((double) QuantumRange+1.0)/2.0; mid=midpoint; node_info=cube_info->root; for (level=1; level <= cube_info->depth; level++) { double distance; bisect*=0.5; id=ColorToNodeId(cube_info,&pixel,index); mid.red+=(id & 1) != 0 ? bisect : -bisect; mid.green+=(id & 2) != 0 ? bisect : -bisect; mid.blue+=(id & 4) != 0 ? bisect : -bisect; mid.alpha+=(id & 8) != 0 ? bisect : -bisect; if (node_info->child[id] == (NodeInfo *) NULL) { /* Set colors of new node to contain pixel. */ node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info); if (node_info->child[id] == (NodeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","%s", image->filename); continue; } if (level == cube_info->depth) cube_info->colors++; } /* Approximate the quantization error represented by this node. */ node_info=node_info->child[id]; error.red=QuantumScale*(pixel.red-mid.red); error.green=QuantumScale*(pixel.green-mid.green); error.blue=QuantumScale*(pixel.blue-mid.blue); if (cube_info->associate_alpha != MagickFalse) error.alpha=QuantumScale*(pixel.alpha-mid.alpha); distance=(double) (error.red*error.red+error.green*error.green+ error.blue*error.blue+error.alpha*error.alpha); if (IsNaN(distance) != MagickFalse) distance=0.0; node_info->quantize_error+=count*sqrt(distance); cube_info->root->quantize_error+=node_info->quantize_error; index--; } /* Sum RGB for this leaf for later derivation of the mean cube color. */ node_info->number_unique+=count; node_info->total_color.red+=count*QuantumScale*ClampPixel(pixel.red); node_info->total_color.green+=count*QuantumScale*ClampPixel(pixel.green); node_info->total_color.blue+=count*QuantumScale*ClampPixel(pixel.blue); if (cube_info->associate_alpha != MagickFalse) node_info->total_color.alpha+=count*QuantumScale* ClampPixel(pixel.alpha); else node_info->total_color.alpha+=count*QuantumScale* ClampPixel((MagickRealType) OpaqueAlpha); p+=count*GetPixelChannels(image); } proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) break; } image_view=DestroyCacheView(image_view); if (cube_info->quantize_info->colorspace != image->colorspace) if ((cube_info->quantize_info->colorspace != UndefinedColorspace) && (cube_info->quantize_info->colorspace != CMYKColorspace)) (void) TransformImageColorspace((Image *) image,sRGBColorspace,exception); return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e Q u a n t i z e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneQuantizeInfo() makes a duplicate of the given quantize info structure, % or if quantize info is NULL, a new one. % % The format of the CloneQuantizeInfo method is: % % QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info) % % A description of each parameter follows: % % o clone_info: Method CloneQuantizeInfo returns a duplicate of the given % quantize info, or if image info is NULL a new one. % % o quantize_info: a structure of type info. % */ MagickExport QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info) { QuantizeInfo *clone_info; clone_info=(QuantizeInfo *) AcquireCriticalMemory(sizeof(*clone_info)); GetQuantizeInfo(clone_info); if (quantize_info == (QuantizeInfo *) NULL) return(clone_info); clone_info->number_colors=quantize_info->number_colors; clone_info->tree_depth=quantize_info->tree_depth; clone_info->dither_method=quantize_info->dither_method; clone_info->colorspace=quantize_info->colorspace; clone_info->measure_error=quantize_info->measure_error; return(clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l o s e s t C o l o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClosestColor() traverses the color cube tree at a particular node and % determines which colormap entry best represents the input color. % % The format of the ClosestColor method is: % % void ClosestColor(const Image *image,CubeInfo *cube_info, % const NodeInfo *node_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % % o node_info: the address of a structure of type NodeInfo which points to a % node in the color cube tree that is to be pruned. % */ static void ClosestColor(const Image *image,CubeInfo *cube_info, const NodeInfo *node_info) { register ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) ClosestColor(image,cube_info,node_info->child[i]); if (node_info->number_unique != 0) { double pixel; register double alpha, beta, distance; register DoublePixelPacket *magick_restrict q; register PixelInfo *magick_restrict p; /* Determine if this color is "closest". */ p=image->colormap+node_info->color_number; q=(&cube_info->target); alpha=1.0; beta=1.0; if (cube_info->associate_alpha != MagickFalse) { alpha=(double) (QuantumScale*p->alpha); beta=(double) (QuantumScale*q->alpha); } pixel=alpha*p->red-beta*q->red; distance=pixel*pixel; if (distance <= cube_info->distance) { pixel=alpha*p->green-beta*q->green; distance+=pixel*pixel; if (distance <= cube_info->distance) { pixel=alpha*p->blue-beta*q->blue; distance+=pixel*pixel; if (distance <= cube_info->distance) { if (cube_info->associate_alpha != MagickFalse) { pixel=p->alpha-q->alpha; distance+=pixel*pixel; } if (distance <= cube_info->distance) { cube_info->distance=distance; cube_info->color_number=node_info->color_number; } } } } } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o m p r e s s I m a g e C o l o r m a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CompressImageColormap() compresses an image colormap by removing any % duplicate or unused color entries. % % The format of the CompressImageColormap method is: % % MagickBooleanType CompressImageColormap(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType CompressImageColormap(Image *image, ExceptionInfo *exception) { QuantizeInfo quantize_info; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (IsPaletteImage(image) == MagickFalse) return(MagickFalse); GetQuantizeInfo(&quantize_info); quantize_info.number_colors=image->colors; quantize_info.tree_depth=MaxTreeDepth; return(QuantizeImage(&quantize_info,image,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e f i n e I m a g e C o l o r m a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DefineImageColormap() traverses the color cube tree and notes each colormap % entry. A colormap entry is any node in the color cube tree where the % of unique colors is not zero. DefineImageColormap() returns the number of % colors in the image colormap. % % The format of the DefineImageColormap method is: % % size_t DefineImageColormap(Image *image,CubeInfo *cube_info, % NodeInfo *node_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % % o node_info: the address of a structure of type NodeInfo which points to a % node in the color cube tree that is to be pruned. % */ static size_t DefineImageColormap(Image *image,CubeInfo *cube_info, NodeInfo *node_info) { register ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) (void) DefineImageColormap(image,cube_info,node_info->child[i]); if (node_info->number_unique != 0) { register double alpha; register PixelInfo *magick_restrict q; /* Colormap entry is defined by the mean color in this cube. */ q=image->colormap+image->colors; alpha=(double) ((MagickOffsetType) node_info->number_unique); alpha=PerceptibleReciprocal(alpha); if (cube_info->associate_alpha == MagickFalse) { q->red=(double) ClampToQuantum(alpha*QuantumRange* node_info->total_color.red); q->green=(double) ClampToQuantum(alpha*QuantumRange* node_info->total_color.green); q->blue=(double) ClampToQuantum(alpha*QuantumRange* node_info->total_color.blue); q->alpha=(double) OpaqueAlpha; } else { double opacity; opacity=(double) (alpha*QuantumRange*node_info->total_color.alpha); q->alpha=(double) ClampToQuantum(opacity); if (q->alpha == OpaqueAlpha) { q->red=(double) ClampToQuantum(alpha*QuantumRange* node_info->total_color.red); q->green=(double) ClampToQuantum(alpha*QuantumRange* node_info->total_color.green); q->blue=(double) ClampToQuantum(alpha*QuantumRange* node_info->total_color.blue); } else { double gamma; gamma=(double) (QuantumScale*q->alpha); gamma=PerceptibleReciprocal(gamma); q->red=(double) ClampToQuantum(alpha*gamma*QuantumRange* node_info->total_color.red); q->green=(double) ClampToQuantum(alpha*gamma*QuantumRange* node_info->total_color.green); q->blue=(double) ClampToQuantum(alpha*gamma*QuantumRange* node_info->total_color.blue); if (node_info->number_unique > cube_info->transparent_pixels) { cube_info->transparent_pixels=node_info->number_unique; cube_info->transparent_index=(ssize_t) image->colors; } } } node_info->color_number=image->colors++; } return(image->colors); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y C u b e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyCubeInfo() deallocates memory associated with an image. % % The format of the DestroyCubeInfo method is: % % DestroyCubeInfo(CubeInfo *cube_info) % % A description of each parameter follows: % % o cube_info: the address of a structure of type CubeInfo. % */ static void DestroyCubeInfo(CubeInfo *cube_info) { register Nodes *nodes; /* Release color cube tree storage. */ do { nodes=cube_info->node_queue->next; cube_info->node_queue->nodes=(NodeInfo *) RelinquishMagickMemory( cube_info->node_queue->nodes); cube_info->node_queue=(Nodes *) RelinquishMagickMemory( cube_info->node_queue); cube_info->node_queue=nodes; } while (cube_info->node_queue != (Nodes *) NULL); if (cube_info->memory_info != (MemoryInfo *) NULL) cube_info->memory_info=RelinquishVirtualMemory(cube_info->memory_info); cube_info->quantize_info=DestroyQuantizeInfo(cube_info->quantize_info); cube_info=(CubeInfo *) RelinquishMagickMemory(cube_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y Q u a n t i z e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyQuantizeInfo() deallocates memory associated with an QuantizeInfo % structure. % % The format of the DestroyQuantizeInfo method is: % % QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % */ MagickExport QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info) { (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(quantize_info != (QuantizeInfo *) NULL); assert(quantize_info->signature == MagickCoreSignature); quantize_info->signature=(~MagickCoreSignature); quantize_info=(QuantizeInfo *) RelinquishMagickMemory(quantize_info); return(quantize_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D i t h e r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DitherImage() distributes the difference between an original image and % the corresponding color reduced algorithm to neighboring pixels using % serpentine-scan Floyd-Steinberg error diffusion. DitherImage returns % MagickTrue if the image is dithered otherwise MagickFalse. % % The format of the DitherImage method is: % % MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info, % ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % % o exception: return any errors or warnings in this structure. % */ static DoublePixelPacket **DestroyPixelThreadSet(DoublePixelPacket **pixels) { register ssize_t i; assert(pixels != (DoublePixelPacket **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (pixels[i] != (DoublePixelPacket *) NULL) pixels[i]=(DoublePixelPacket *) RelinquishMagickMemory(pixels[i]); pixels=(DoublePixelPacket **) RelinquishMagickMemory(pixels); return(pixels); } static DoublePixelPacket **AcquirePixelThreadSet(const size_t count) { DoublePixelPacket **pixels; register ssize_t i; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); pixels=(DoublePixelPacket **) AcquireQuantumMemory(number_threads, sizeof(*pixels)); if (pixels == (DoublePixelPacket **) NULL) return((DoublePixelPacket **) NULL); (void) memset(pixels,0,number_threads*sizeof(*pixels)); for (i=0; i < (ssize_t) number_threads; i++) { pixels[i]=(DoublePixelPacket *) AcquireQuantumMemory(count,2* sizeof(**pixels)); if (pixels[i] == (DoublePixelPacket *) NULL) return(DestroyPixelThreadSet(pixels)); } return(pixels); } static inline ssize_t CacheOffset(CubeInfo *cube_info, const DoublePixelPacket *pixel) { #define RedShift(pixel) (((pixel) >> CacheShift) << (0*(8-CacheShift))) #define GreenShift(pixel) (((pixel) >> CacheShift) << (1*(8-CacheShift))) #define BlueShift(pixel) (((pixel) >> CacheShift) << (2*(8-CacheShift))) #define AlphaShift(pixel) (((pixel) >> CacheShift) << (3*(8-CacheShift))) ssize_t offset; offset=(ssize_t) (RedShift(ScaleQuantumToChar(ClampPixel(pixel->red))) | GreenShift(ScaleQuantumToChar(ClampPixel(pixel->green))) | BlueShift(ScaleQuantumToChar(ClampPixel(pixel->blue)))); if (cube_info->associate_alpha != MagickFalse) offset|=AlphaShift(ScaleQuantumToChar(ClampPixel(pixel->alpha))); return(offset); } static MagickBooleanType FloydSteinbergDither(Image *image,CubeInfo *cube_info, ExceptionInfo *exception) { #define DitherImageTag "Dither/Image" CacheView *image_view; const char *artifact; double amount; DoublePixelPacket **pixels; MagickBooleanType status; ssize_t y; /* Distribute quantization error using Floyd-Steinberg. */ pixels=AcquirePixelThreadSet(image->columns); if (pixels == (DoublePixelPacket **) NULL) return(MagickFalse); status=MagickTrue; amount=1.0; artifact=GetImageArtifact(image,"dither:diffusion-amount"); if (artifact != (const char *) NULL) amount=StringToDoubleInterval(artifact,1.0); image_view=AcquireAuthenticCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); CubeInfo cube; DoublePixelPacket *current, *previous; register Quantum *magick_restrict q; register ssize_t x; size_t index; ssize_t v; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } cube=(*cube_info); current=pixels[id]+(y & 0x01)*image->columns; previous=pixels[id]+((y+1) & 0x01)*image->columns; v=(ssize_t) ((y & 0x01) != 0 ? -1 : 1); for (x=0; x < (ssize_t) image->columns; x++) { DoublePixelPacket color, pixel; register ssize_t i; ssize_t u; u=(y & 0x01) != 0 ? (ssize_t) image->columns-1-x : x; AssociateAlphaPixel(image,&cube,q+u*GetPixelChannels(image),&pixel); if (x > 0) { pixel.red+=7.0*amount*current[u-v].red/16; pixel.green+=7.0*amount*current[u-v].green/16; pixel.blue+=7.0*amount*current[u-v].blue/16; if (cube.associate_alpha != MagickFalse) pixel.alpha+=7.0*amount*current[u-v].alpha/16; } if (y > 0) { if (x < (ssize_t) (image->columns-1)) { pixel.red+=previous[u+v].red/16; pixel.green+=previous[u+v].green/16; pixel.blue+=previous[u+v].blue/16; if (cube.associate_alpha != MagickFalse) pixel.alpha+=previous[u+v].alpha/16; } pixel.red+=5.0*amount*previous[u].red/16; pixel.green+=5.0*amount*previous[u].green/16; pixel.blue+=5.0*amount*previous[u].blue/16; if (cube.associate_alpha != MagickFalse) pixel.alpha+=5.0*amount*previous[u].alpha/16; if (x > 0) { pixel.red+=3.0*amount*previous[u-v].red/16; pixel.green+=3.0*amount*previous[u-v].green/16; pixel.blue+=3.0*amount*previous[u-v].blue/16; if (cube.associate_alpha != MagickFalse) pixel.alpha+=3.0*amount*previous[u-v].alpha/16; } } pixel.red=(double) ClampPixel(pixel.red); pixel.green=(double) ClampPixel(pixel.green); pixel.blue=(double) ClampPixel(pixel.blue); if (cube.associate_alpha != MagickFalse) pixel.alpha=(double) ClampPixel(pixel.alpha); i=CacheOffset(&cube,&pixel); if (cube.cache[i] < 0) { register NodeInfo *node_info; register size_t node_id; /* Identify the deepest node containing the pixel's color. */ node_info=cube.root; for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--) { node_id=ColorToNodeId(&cube,&pixel,index); if (node_info->child[node_id] == (NodeInfo *) NULL) break; node_info=node_info->child[node_id]; } /* Find closest color among siblings and their children. */ cube.target=pixel; cube.distance=(double) (4.0*(QuantumRange+1.0)*(QuantumRange+1.0)+ 1.0); ClosestColor(image,&cube,node_info->parent); cube.cache[i]=(ssize_t) cube.color_number; } /* Assign pixel to closest colormap entry. */ index=(size_t) cube.cache[i]; if (image->storage_class == PseudoClass) SetPixelIndex(image,(Quantum) index,q+u*GetPixelChannels(image)); if (cube.quantize_info->measure_error == MagickFalse) { SetPixelRed(image,ClampToQuantum(image->colormap[index].red), q+u*GetPixelChannels(image)); SetPixelGreen(image,ClampToQuantum(image->colormap[index].green), q+u*GetPixelChannels(image)); SetPixelBlue(image,ClampToQuantum(image->colormap[index].blue), q+u*GetPixelChannels(image)); if (cube.associate_alpha != MagickFalse) SetPixelAlpha(image,ClampToQuantum(image->colormap[index].alpha), q+u*GetPixelChannels(image)); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; /* Store the error. */ AssociateAlphaPixelInfo(&cube,image->colormap+index,&color); current[u].red=pixel.red-color.red; current[u].green=pixel.green-color.green; current[u].blue=pixel.blue-color.blue; if (cube.associate_alpha != MagickFalse) current[u].alpha=pixel.alpha-color.alpha; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,DitherImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } } image_view=DestroyCacheView(image_view); pixels=DestroyPixelThreadSet(pixels); return(MagickTrue); } static MagickBooleanType RiemersmaDither(Image *,CacheView *,CubeInfo *,const unsigned int, ExceptionInfo *); static void Riemersma(Image *image,CacheView *image_view,CubeInfo *cube_info, const size_t level,const unsigned int direction,ExceptionInfo *exception) { if (level == 1) switch (direction) { case WestGravity: { (void) RiemersmaDither(image,image_view,cube_info,EastGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,SouthGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,WestGravity, exception); break; } case EastGravity: { (void) RiemersmaDither(image,image_view,cube_info,WestGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,NorthGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,EastGravity, exception); break; } case NorthGravity: { (void) RiemersmaDither(image,image_view,cube_info,SouthGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,EastGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,NorthGravity, exception); break; } case SouthGravity: { (void) RiemersmaDither(image,image_view,cube_info,NorthGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,WestGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,SouthGravity, exception); break; } default: break; } else switch (direction) { case WestGravity: { Riemersma(image,image_view,cube_info,level-1,NorthGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,EastGravity, exception); Riemersma(image,image_view,cube_info,level-1,WestGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,SouthGravity, exception); Riemersma(image,image_view,cube_info,level-1,WestGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,WestGravity, exception); Riemersma(image,image_view,cube_info,level-1,SouthGravity, exception); break; } case EastGravity: { Riemersma(image,image_view,cube_info,level-1,SouthGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,WestGravity, exception); Riemersma(image,image_view,cube_info,level-1,EastGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,NorthGravity, exception); Riemersma(image,image_view,cube_info,level-1,EastGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,EastGravity, exception); Riemersma(image,image_view,cube_info,level-1,NorthGravity, exception); break; } case NorthGravity: { Riemersma(image,image_view,cube_info,level-1,WestGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,SouthGravity, exception); Riemersma(image,image_view,cube_info,level-1,NorthGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,EastGravity, exception); Riemersma(image,image_view,cube_info,level-1,NorthGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,NorthGravity, exception); Riemersma(image,image_view,cube_info,level-1,EastGravity, exception); break; } case SouthGravity: { Riemersma(image,image_view,cube_info,level-1,EastGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,NorthGravity, exception); Riemersma(image,image_view,cube_info,level-1,SouthGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,WestGravity, exception); Riemersma(image,image_view,cube_info,level-1,SouthGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,SouthGravity, exception); Riemersma(image,image_view,cube_info,level-1,WestGravity, exception); break; } default: break; } } static MagickBooleanType RiemersmaDither(Image *image,CacheView *image_view, CubeInfo *cube_info,const unsigned int direction,ExceptionInfo *exception) { #define DitherImageTag "Dither/Image" DoublePixelPacket color, pixel; MagickBooleanType proceed; register CubeInfo *p; size_t index; p=cube_info; if ((p->x >= 0) && (p->x < (ssize_t) image->columns) && (p->y >= 0) && (p->y < (ssize_t) image->rows)) { register Quantum *magick_restrict q; register ssize_t i; /* Distribute error. */ q=GetCacheViewAuthenticPixels(image_view,p->x,p->y,1,1,exception); if (q == (Quantum *) NULL) return(MagickFalse); AssociateAlphaPixel(image,cube_info,q,&pixel); for (i=0; i < ErrorQueueLength; i++) { pixel.red+=p->weights[i]*p->error[i].red; pixel.green+=p->weights[i]*p->error[i].green; pixel.blue+=p->weights[i]*p->error[i].blue; if (cube_info->associate_alpha != MagickFalse) pixel.alpha+=p->weights[i]*p->error[i].alpha; } pixel.red=(double) ClampPixel(pixel.red); pixel.green=(double) ClampPixel(pixel.green); pixel.blue=(double) ClampPixel(pixel.blue); if (cube_info->associate_alpha != MagickFalse) pixel.alpha=(double) ClampPixel(pixel.alpha); i=CacheOffset(cube_info,&pixel); if (p->cache[i] < 0) { register NodeInfo *node_info; register size_t id; /* Identify the deepest node containing the pixel's color. */ node_info=p->root; for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--) { id=ColorToNodeId(cube_info,&pixel,index); if (node_info->child[id] == (NodeInfo *) NULL) break; node_info=node_info->child[id]; } /* Find closest color among siblings and their children. */ p->target=pixel; p->distance=(double) (4.0*(QuantumRange+1.0)*((double) QuantumRange+1.0)+1.0); ClosestColor(image,p,node_info->parent); p->cache[i]=(ssize_t) p->color_number; } /* Assign pixel to closest colormap entry. */ index=(size_t) p->cache[i]; if (image->storage_class == PseudoClass) SetPixelIndex(image,(Quantum) index,q); if (cube_info->quantize_info->measure_error == MagickFalse) { SetPixelRed(image,ClampToQuantum(image->colormap[index].red),q); SetPixelGreen(image,ClampToQuantum(image->colormap[index].green),q); SetPixelBlue(image,ClampToQuantum(image->colormap[index].blue),q); if (cube_info->associate_alpha != MagickFalse) SetPixelAlpha(image,ClampToQuantum(image->colormap[index].alpha),q); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) return(MagickFalse); /* Propagate the error as the last entry of the error queue. */ (void) memmove(p->error,p->error+1,(ErrorQueueLength-1)* sizeof(p->error[0])); AssociateAlphaPixelInfo(cube_info,image->colormap+index,&color); p->error[ErrorQueueLength-1].red=pixel.red-color.red; p->error[ErrorQueueLength-1].green=pixel.green-color.green; p->error[ErrorQueueLength-1].blue=pixel.blue-color.blue; if (cube_info->associate_alpha != MagickFalse) p->error[ErrorQueueLength-1].alpha=pixel.alpha-color.alpha; proceed=SetImageProgress(image,DitherImageTag,p->offset,p->span); if (proceed == MagickFalse) return(MagickFalse); p->offset++; } switch (direction) { case WestGravity: p->x--; break; case EastGravity: p->x++; break; case NorthGravity: p->y--; break; case SouthGravity: p->y++; break; } return(MagickTrue); } static MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; register ssize_t i; size_t depth; if (cube_info->quantize_info->dither_method != RiemersmaDitherMethod) return(FloydSteinbergDither(image,cube_info,exception)); /* Distribute quantization error along a Hilbert curve. */ (void) memset(cube_info->error,0,ErrorQueueLength*sizeof(*cube_info->error)); cube_info->x=0; cube_info->y=0; i=MagickMax((ssize_t) image->columns,(ssize_t) image->rows); for (depth=1; i != 0; depth++) i>>=1; if ((ssize_t) (1L << depth) < MagickMax((ssize_t) image->columns,(ssize_t) image->rows)) depth++; cube_info->offset=0; cube_info->span=(MagickSizeType) image->columns*image->rows; image_view=AcquireAuthenticCacheView(image,exception); if (depth > 1) Riemersma(image,image_view,cube_info,depth-1,NorthGravity,exception); status=RiemersmaDither(image,image_view,cube_info,ForgetGravity,exception); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t C u b e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetCubeInfo() initialize the Cube data structure. % % The format of the GetCubeInfo method is: % % CubeInfo GetCubeInfo(const QuantizeInfo *quantize_info, % const size_t depth,const size_t maximum_colors) % % A description of each parameter follows. % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o depth: Normally, this integer value is zero or one. A zero or % one tells Quantize to choose a optimal tree depth of Log4(number_colors). % A tree of this depth generally allows the best representation of the % reference image with the least amount of memory and the fastest % computational speed. In some cases, such as an image with low color % dispersion (a few number of colors), a value other than % Log4(number_colors) is required. To expand the color tree completely, % use a value of 8. % % o maximum_colors: maximum colors. % */ static CubeInfo *GetCubeInfo(const QuantizeInfo *quantize_info, const size_t depth,const size_t maximum_colors) { CubeInfo *cube_info; double sum, weight; register ssize_t i; size_t length; /* Initialize tree to describe color cube_info. */ cube_info=(CubeInfo *) AcquireMagickMemory(sizeof(*cube_info)); if (cube_info == (CubeInfo *) NULL) return((CubeInfo *) NULL); (void) memset(cube_info,0,sizeof(*cube_info)); cube_info->depth=depth; if (cube_info->depth > MaxTreeDepth) cube_info->depth=MaxTreeDepth; if (cube_info->depth < 2) cube_info->depth=2; cube_info->maximum_colors=maximum_colors; /* Initialize root node. */ cube_info->root=GetNodeInfo(cube_info,0,0,(NodeInfo *) NULL); if (cube_info->root == (NodeInfo *) NULL) return((CubeInfo *) NULL); cube_info->root->parent=cube_info->root; cube_info->quantize_info=CloneQuantizeInfo(quantize_info); if (cube_info->quantize_info->dither_method == NoDitherMethod) return(cube_info); /* Initialize dither resources. */ length=(size_t) (1UL << (4*(8-CacheShift))); cube_info->memory_info=AcquireVirtualMemory(length,sizeof(*cube_info->cache)); if (cube_info->memory_info == (MemoryInfo *) NULL) return((CubeInfo *) NULL); cube_info->cache=(ssize_t *) GetVirtualMemoryBlob(cube_info->memory_info); /* Initialize color cache. */ (void) memset(cube_info->cache,(-1),sizeof(*cube_info->cache)*length); /* Distribute weights along a curve of exponential decay. */ weight=1.0; for (i=0; i < ErrorQueueLength; i++) { cube_info->weights[ErrorQueueLength-i-1]=PerceptibleReciprocal(weight); weight*=exp(log(((double) QuantumRange+1.0))/(ErrorQueueLength-1.0)); } /* Normalize the weighting factors. */ weight=0.0; for (i=0; i < ErrorQueueLength; i++) weight+=cube_info->weights[i]; sum=0.0; for (i=0; i < ErrorQueueLength; i++) { cube_info->weights[i]/=weight; sum+=cube_info->weights[i]; } cube_info->weights[0]+=1.0-sum; return(cube_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t N o d e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetNodeInfo() allocates memory for a new node in the color cube tree and % presets all fields to zero. % % The format of the GetNodeInfo method is: % % NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id, % const size_t level,NodeInfo *parent) % % A description of each parameter follows. % % o node: The GetNodeInfo method returns a pointer to a queue of nodes. % % o id: Specifies the child number of the node. % % o level: Specifies the level in the storage_class the node resides. % */ static NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id, const size_t level,NodeInfo *parent) { NodeInfo *node_info; if (cube_info->free_nodes == 0) { Nodes *nodes; /* Allocate a new queue of nodes. */ nodes=(Nodes *) AcquireMagickMemory(sizeof(*nodes)); if (nodes == (Nodes *) NULL) return((NodeInfo *) NULL); nodes->nodes=(NodeInfo *) AcquireQuantumMemory(NodesInAList, sizeof(*nodes->nodes)); if (nodes->nodes == (NodeInfo *) NULL) return((NodeInfo *) NULL); nodes->next=cube_info->node_queue; cube_info->node_queue=nodes; cube_info->next_node=nodes->nodes; cube_info->free_nodes=NodesInAList; } cube_info->nodes++; cube_info->free_nodes--; node_info=cube_info->next_node++; (void) memset(node_info,0,sizeof(*node_info)); node_info->parent=parent; node_info->id=id; node_info->level=level; return(node_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e Q u a n t i z e E r r o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageQuantizeError() measures the difference between the original % and quantized images. This difference is the total quantization error. % The error is computed by summing over all pixels in an image the distance % squared in RGB space between each reference pixel value and its quantized % value. These values are computed: % % o mean_error_per_pixel: This value is the mean error for any single % pixel in the image. % % o normalized_mean_square_error: This value is the normalized mean % quantization error for any single pixel in the image. This distance % measure is normalized to a range between 0 and 1. It is independent % of the range of red, green, and blue values in the image. % % o normalized_maximum_square_error: Thsi value is the normalized % maximum quantization error for any single pixel in the image. This % distance measure is normalized to a range between 0 and 1. It is % independent of the range of red, green, and blue values in your image. % % The format of the GetImageQuantizeError method is: % % MagickBooleanType GetImageQuantizeError(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetImageQuantizeError(Image *image, ExceptionInfo *exception) { CacheView *image_view; double alpha, area, beta, distance, maximum_error, mean_error, mean_error_per_pixel; ssize_t index, y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); image->total_colors=GetNumberColors(image,(FILE *) NULL,exception); (void) memset(&image->error,0,sizeof(image->error)); if (image->storage_class == DirectClass) return(MagickTrue); alpha=1.0; beta=1.0; area=3.0*image->columns*image->rows; maximum_error=0.0; mean_error_per_pixel=0.0; mean_error=0.0; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { index=(ssize_t) GetPixelIndex(image,p); if (image->alpha_trait == BlendPixelTrait) { alpha=(double) (QuantumScale*GetPixelAlpha(image,p)); beta=(double) (QuantumScale*image->colormap[index].alpha); } distance=fabs((double) (alpha*GetPixelRed(image,p)-beta* image->colormap[index].red)); mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; distance=fabs((double) (alpha*GetPixelGreen(image,p)-beta* image->colormap[index].green)); mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; distance=fabs((double) (alpha*GetPixelBlue(image,p)-beta* image->colormap[index].blue)); mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); image->error.mean_error_per_pixel=(double) mean_error_per_pixel/area; image->error.normalized_mean_error=(double) QuantumScale*QuantumScale* mean_error/area; image->error.normalized_maximum_error=(double) QuantumScale*maximum_error; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t Q u a n t i z e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetQuantizeInfo() initializes the QuantizeInfo structure. % % The format of the GetQuantizeInfo method is: % % GetQuantizeInfo(QuantizeInfo *quantize_info) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to a QuantizeInfo structure. % */ MagickExport void GetQuantizeInfo(QuantizeInfo *quantize_info) { (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(quantize_info != (QuantizeInfo *) NULL); (void) memset(quantize_info,0,sizeof(*quantize_info)); quantize_info->number_colors=256; quantize_info->dither_method=RiemersmaDitherMethod; quantize_info->colorspace=UndefinedColorspace; quantize_info->measure_error=MagickFalse; quantize_info->signature=MagickCoreSignature; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % K m e a n s I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % KmeansImage() applies k-means color reduction to an image. This is a % colorspace clustering or segmentation technique. % % The format of the KmeansImage method is: % % MagickBooleanType KmeansImage(Image *image,const size_t number_colors, % const size_t max_iterations,const double tolerance, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o number_colors: number of colors to use as seeds. % % o max_iterations: maximum number of iterations while converging. % % o tolerance: the maximum tolerance. % % o exception: return any errors or warnings in this structure. % */ typedef struct _KmeansInfo { double red, green, blue, alpha, black, count, distortion; } KmeansInfo; static KmeansInfo **DestroyKmeansThreadSet(KmeansInfo **kmeans_info) { register ssize_t i; assert(kmeans_info != (KmeansInfo **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (kmeans_info[i] != (KmeansInfo *) NULL) kmeans_info[i]=(KmeansInfo *) RelinquishMagickMemory(kmeans_info[i]); kmeans_info=(KmeansInfo **) RelinquishMagickMemory(kmeans_info); return(kmeans_info); } static KmeansInfo **AcquireKmeansThreadSet(const size_t number_colors) { KmeansInfo **kmeans_info; register ssize_t i; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); kmeans_info=(KmeansInfo **) AcquireQuantumMemory(number_threads, sizeof(*kmeans_info)); if (kmeans_info == (KmeansInfo **) NULL) return((KmeansInfo **) NULL); (void) memset(kmeans_info,0,number_threads*sizeof(*kmeans_info)); for (i=0; i < (ssize_t) number_threads; i++) { kmeans_info[i]=(KmeansInfo *) AcquireQuantumMemory(number_colors, sizeof(**kmeans_info)); if (kmeans_info[i] == (KmeansInfo *) NULL) return(DestroyKmeansThreadSet(kmeans_info)); } return(kmeans_info); } static inline double KmeansMetric(const Image *magick_restrict image, const Quantum *magick_restrict p,const PixelInfo *magick_restrict q) { register double gamma, metric, pixel; gamma=1.0; metric=0.0; if ((image->alpha_trait != UndefinedPixelTrait) || (q->alpha_trait != UndefinedPixelTrait)) { pixel=GetPixelAlpha(image,p)-(q->alpha_trait != UndefinedPixelTrait ? q->alpha : OpaqueAlpha); metric+=pixel*pixel; if (image->alpha_trait != UndefinedPixelTrait) gamma*=QuantumScale*GetPixelAlpha(image,p); if (q->alpha_trait != UndefinedPixelTrait) gamma*=QuantumScale*q->alpha; } if (image->colorspace == CMYKColorspace) { pixel=QuantumScale*(GetPixelBlack(image,p)-q->black); metric+=gamma*pixel*pixel; gamma*=QuantumScale*(QuantumRange-GetPixelBlack(image,p)); gamma*=QuantumScale*(QuantumRange-q->black); } metric*=3.0; pixel=QuantumScale*(GetPixelRed(image,p)-q->red); if (IsHueCompatibleColorspace(image->colorspace) != MagickFalse) { if (fabs((double) pixel) > 0.5) pixel-=0.5; pixel*=2.0; } metric+=gamma*pixel*pixel; pixel=QuantumScale*(GetPixelGreen(image,p)-q->green); metric+=gamma*pixel*pixel; pixel=QuantumScale*(GetPixelBlue(image,p)-q->blue); metric+=gamma*pixel*pixel; return(metric); } MagickExport MagickBooleanType KmeansImage(Image *image, const size_t number_colors,const size_t max_iterations,const double tolerance, ExceptionInfo *exception) { #define KmeansImageTag "Kmeans/Image" #define RandomColorComponent(info) (QuantumRange*GetPseudoRandomValue(info)) CacheView *image_view; const char *colors; double previous_tolerance; KmeansInfo **kmeans_pixels; MagickBooleanType verbose, status; register ssize_t n; size_t number_threads; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); colors=GetImageArtifact(image,"kmeans:seed-colors"); if (colors == (const char *) NULL) { CubeInfo *cube_info; QuantizeInfo *quantize_info; size_t colors, depth; /* Seed clusters from color quantization. */ quantize_info=AcquireQuantizeInfo((ImageInfo *) NULL); quantize_info->colorspace=image->colorspace; quantize_info->number_colors=number_colors; quantize_info->dither_method=NoDitherMethod; colors=number_colors; for (depth=1; colors != 0; depth++) colors>>=2; cube_info=GetCubeInfo(quantize_info,depth,number_colors); if (cube_info == (CubeInfo *) NULL) { quantize_info=DestroyQuantizeInfo(quantize_info); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } status=ClassifyImageColors(cube_info,image,exception); if (status != MagickFalse) { if (cube_info->colors > cube_info->maximum_colors) ReduceImageColors(image,cube_info); status=AcquireImageColormap(image,number_colors,exception); if (status != MagickFalse) { image->colors=0; (void) DefineImageColormap(image,cube_info,cube_info->root); } } DestroyCubeInfo(cube_info); quantize_info=DestroyQuantizeInfo(quantize_info); if (status == MagickFalse) return(status); } else { char color[MagickPathExtent]; register const char *p; /* Seed clusters from color list (e.g. red;green;blue). */ status=AcquireImageColormap(image,number_colors,exception); if (status == MagickFalse) return(status); for (n=0, p=colors; n < (ssize_t) image->colors; n++) { register const char *q; for (q=p; *q != '\0'; q++) if (*q == ';') break; (void) CopyMagickString(color,p,(size_t) MagickMin(q-p+1, MagickPathExtent)); (void) QueryColorCompliance(color,AllCompliance,image->colormap+n, exception); if (*q == '\0') { n++; break; } p=q+1; } if (n < (ssize_t) image->colors) { RandomInfo *random_info; /* Seed clusters from random values. */ random_info=AcquireRandomInfo(); for ( ; n < (ssize_t) image->colors; n++) { (void) QueryColorCompliance("#000",AllCompliance,image->colormap+n, exception); image->colormap[n].red=RandomColorComponent(random_info); image->colormap[n].green=RandomColorComponent(random_info); image->colormap[n].blue=RandomColorComponent(random_info); if (image->alpha_trait != BlendPixelTrait) image->colormap[n].alpha=RandomColorComponent(random_info); if (image->colorspace == CMYKColorspace) image->colormap[n].black=RandomColorComponent(random_info); } random_info=DestroyRandomInfo(random_info); } } /* Iterative refinement. */ kmeans_pixels=AcquireKmeansThreadSet(number_colors); if (kmeans_pixels == (KmeansInfo **) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); previous_tolerance=0.0; verbose=IsStringTrue(GetImageArtifact(image,"debug")); number_threads=(size_t) GetMagickResourceLimit(ThreadResource); image_view=AcquireAuthenticCacheView(image,exception); for (n=0; n < (ssize_t) max_iterations; n++) { double distortion; register ssize_t i; ssize_t y; for (i=0; i < (ssize_t) number_threads; i++) (void) memset(kmeans_pixels[i],0,image->colors*sizeof(*kmeans_pixels[i])); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double min_distance; register ssize_t i; ssize_t j; /* Assign each pixel whose mean has the least squared color distance. */ j=0; min_distance=KmeansMetric(image,q,image->colormap+0); for (i=1; i < (ssize_t) image->colors; i++) { double distance; if (min_distance <= MagickEpsilon) break; distance=KmeansMetric(image,q,image->colormap+i); if (distance < min_distance) { min_distance=distance; j=i; } } kmeans_pixels[id][j].red+=QuantumScale*GetPixelRed(image,q); kmeans_pixels[id][j].green+=QuantumScale*GetPixelGreen(image,q); kmeans_pixels[id][j].blue+=QuantumScale*GetPixelBlue(image,q); if (image->alpha_trait != BlendPixelTrait) kmeans_pixels[id][j].alpha+=QuantumScale*GetPixelAlpha(image,q); if (image->colorspace == CMYKColorspace) kmeans_pixels[id][j].black+=QuantumScale*GetPixelBlack(image,q); kmeans_pixels[id][j].count++; kmeans_pixels[id][j].distortion+=min_distance; SetPixelIndex(image,(Quantum) j,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } if (status == MagickFalse) break; /* Reduce sums to [0] entry. */ for (i=1; i < (ssize_t) number_threads; i++) { register ssize_t j; for (j=0; j < (ssize_t) image->colors; j++) { kmeans_pixels[0][j].red+=kmeans_pixels[i][j].red; kmeans_pixels[0][j].green+=kmeans_pixels[i][j].green; kmeans_pixels[0][j].blue+=kmeans_pixels[i][j].blue; if (image->alpha_trait != BlendPixelTrait) kmeans_pixels[0][j].alpha+=kmeans_pixels[i][j].alpha; if (image->colorspace == CMYKColorspace) kmeans_pixels[0][j].black+=kmeans_pixels[i][j].black; kmeans_pixels[0][j].count+=kmeans_pixels[i][j].count; kmeans_pixels[0][j].distortion+=kmeans_pixels[i][j].distortion; } } /* Calculate the new means (centroids) of the pixels in the new clusters. */ distortion=0.0; for (i=0; i < (ssize_t) image->colors; i++) { double gamma; gamma=PerceptibleReciprocal((double) kmeans_pixels[0][i].count); image->colormap[i].red=gamma*QuantumRange*kmeans_pixels[0][i].red; image->colormap[i].green=gamma*QuantumRange*kmeans_pixels[0][i].green; image->colormap[i].blue=gamma*QuantumRange*kmeans_pixels[0][i].blue; if (image->alpha_trait != BlendPixelTrait) image->colormap[i].alpha=gamma*QuantumRange*kmeans_pixels[0][i].alpha; if (image->colorspace == CMYKColorspace) image->colormap[i].black=gamma*QuantumRange*kmeans_pixels[0][i].black; distortion+=kmeans_pixels[0][i].distortion; } if (verbose != MagickFalse) (void) FormatLocaleFile(stderr,"distortion[%.20g]: %*g %*g\n",(double) n, GetMagickPrecision(),distortion,GetMagickPrecision(), fabs(distortion-previous_tolerance)); if (fabs(distortion-previous_tolerance) <= tolerance) break; previous_tolerance=distortion; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,KmeansImageTag,(MagickOffsetType) n, max_iterations); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); kmeans_pixels=DestroyKmeansThreadSet(kmeans_pixels); if (image->progress_monitor != (MagickProgressMonitor) NULL) (void) SetImageProgress(image,KmeansImageTag,(MagickOffsetType) max_iterations-1,max_iterations); if (status == MagickFalse) return(status); return(SyncImage(image,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P o s t e r i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PosterizeImage() reduces the image to a limited number of colors for a % "poster" effect. % % The format of the PosterizeImage method is: % % MagickBooleanType PosterizeImage(Image *image,const size_t levels, % const DitherMethod dither_method,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: Specifies a pointer to an Image structure. % % o levels: Number of color levels allowed in each channel. Very low values % (2, 3, or 4) have the most visible effect. % % o dither_method: choose from UndefinedDitherMethod, NoDitherMethod, % RiemersmaDitherMethod, FloydSteinbergDitherMethod. % % o exception: return any errors or warnings in this structure. % */ static inline double MagickRound(double x) { /* Round the fraction to nearest integer. */ if ((x-floor(x)) < (ceil(x)-x)) return(floor(x)); return(ceil(x)); } MagickExport MagickBooleanType PosterizeImage(Image *image,const size_t levels, const DitherMethod dither_method,ExceptionInfo *exception) { #define PosterizeImageTag "Posterize/Image" #define PosterizePixel(pixel) ClampToQuantum((MagickRealType) QuantumRange*( \ MagickRound(QuantumScale*pixel*(levels-1)))/MagickMax((ssize_t) levels-1,1)) CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; QuantizeInfo *quantize_info; register ssize_t i; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (image->storage_class == PseudoClass) #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->colors,1) #endif for (i=0; i < (ssize_t) image->colors; i++) { /* Posterize colormap. */ if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red=(double) PosterizePixel(image->colormap[i].red); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green=(double) PosterizePixel(image->colormap[i].green); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue=(double) PosterizePixel(image->colormap[i].blue); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].alpha=(double) PosterizePixel(image->colormap[i].alpha); } /* Posterize image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) SetPixelRed(image,PosterizePixel(GetPixelRed(image,q)),q); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) SetPixelGreen(image,PosterizePixel(GetPixelGreen(image,q)),q); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) SetPixelBlue(image,PosterizePixel(GetPixelBlue(image,q)),q); if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) SetPixelBlack(image,PosterizePixel(GetPixelBlack(image,q)),q); if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait == BlendPixelTrait)) SetPixelAlpha(image,PosterizePixel(GetPixelAlpha(image,q)),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,PosterizeImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); quantize_info=AcquireQuantizeInfo((ImageInfo *) NULL); quantize_info->number_colors=(size_t) MagickMin((ssize_t) levels*levels* levels,MaxColormapSize+1); quantize_info->dither_method=dither_method; quantize_info->tree_depth=MaxTreeDepth; status=QuantizeImage(quantize_info,image,exception); quantize_info=DestroyQuantizeInfo(quantize_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P r u n e C h i l d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PruneChild() deletes the given node and merges its statistics into its % parent. % % The format of the PruneSubtree method is: % % PruneChild(CubeInfo *cube_info,const NodeInfo *node_info) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is to be pruned. % */ static void PruneChild(CubeInfo *cube_info,const NodeInfo *node_info) { NodeInfo *parent; register ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) PruneChild(cube_info,node_info->child[i]); if (cube_info->nodes > cube_info->maximum_colors) { /* Merge color statistics into parent. */ parent=node_info->parent; parent->number_unique+=node_info->number_unique; parent->total_color.red+=node_info->total_color.red; parent->total_color.green+=node_info->total_color.green; parent->total_color.blue+=node_info->total_color.blue; parent->total_color.alpha+=node_info->total_color.alpha; parent->child[node_info->id]=(NodeInfo *) NULL; cube_info->nodes--; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P r u n e L e v e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PruneLevel() deletes all nodes at the bottom level of the color tree merging % their color statistics into their parent node. % % The format of the PruneLevel method is: % % PruneLevel(CubeInfo *cube_info,const NodeInfo *node_info) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is to be pruned. % */ static void PruneLevel(CubeInfo *cube_info,const NodeInfo *node_info) { register ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) PruneLevel(cube_info,node_info->child[i]); if (node_info->level == cube_info->depth) PruneChild(cube_info,node_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P r u n e T o C u b e D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PruneToCubeDepth() deletes any nodes at a depth greater than % cube_info->depth while merging their color statistics into their parent % node. % % The format of the PruneToCubeDepth method is: % % PruneToCubeDepth(CubeInfo *cube_info,const NodeInfo *node_info) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is to be pruned. % */ static void PruneToCubeDepth(CubeInfo *cube_info,const NodeInfo *node_info) { register ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) PruneToCubeDepth(cube_info,node_info->child[i]); if (node_info->level > cube_info->depth) PruneChild(cube_info,node_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % Q u a n t i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QuantizeImage() analyzes the colors within a reference image and chooses a % fixed number of colors to represent the image. The goal of the algorithm % is to minimize the color difference between the input and output image while % minimizing the processing time. % % The format of the QuantizeImage method is: % % MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info, % Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info, Image *image,ExceptionInfo *exception) { CubeInfo *cube_info; MagickBooleanType status; size_t depth, maximum_colors; assert(quantize_info != (const QuantizeInfo *) NULL); assert(quantize_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); maximum_colors=quantize_info->number_colors; if (maximum_colors == 0) maximum_colors=MaxColormapSize; if (maximum_colors > MaxColormapSize) maximum_colors=MaxColormapSize; if (image->alpha_trait != BlendPixelTrait) { if (SetImageGray(image,exception) != MagickFalse) (void) SetGrayscaleImage(image,exception); } depth=quantize_info->tree_depth; if (depth == 0) { size_t colors; /* Depth of color tree is: Log4(colormap size)+2. */ colors=maximum_colors; for (depth=1; colors != 0; depth++) colors>>=2; if ((quantize_info->dither_method != NoDitherMethod) && (depth > 2)) depth--; if ((image->alpha_trait == BlendPixelTrait) && (depth > 5)) depth--; if (SetImageGray(image,exception) != MagickFalse) depth=MaxTreeDepth; } /* Initialize color cube. */ cube_info=GetCubeInfo(quantize_info,depth,maximum_colors); if (cube_info == (CubeInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ClassifyImageColors(cube_info,image,exception); if (status != MagickFalse) { /* Reduce the number of colors in the image. */ if (cube_info->colors > cube_info->maximum_colors) ReduceImageColors(image,cube_info); status=AssignImageColors(image,cube_info,exception); } DestroyCubeInfo(cube_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % Q u a n t i z e I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QuantizeImages() analyzes the colors within a set of reference images and % chooses a fixed number of colors to represent the set. The goal of the % algorithm is to minimize the color difference between the input and output % images while minimizing the processing time. % % The format of the QuantizeImages method is: % % MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info, % Image *images,ExceptionInfo *exception) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o images: Specifies a pointer to a list of Image structures. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info, Image *images,ExceptionInfo *exception) { CubeInfo *cube_info; Image *image; MagickBooleanType proceed, status; MagickProgressMonitor progress_monitor; register ssize_t i; size_t depth, maximum_colors, number_images; assert(quantize_info != (const QuantizeInfo *) NULL); assert(quantize_info->signature == MagickCoreSignature); assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (GetNextImageInList(images) == (Image *) NULL) { /* Handle a single image with QuantizeImage. */ status=QuantizeImage(quantize_info,images,exception); return(status); } status=MagickFalse; maximum_colors=quantize_info->number_colors; if (maximum_colors == 0) maximum_colors=MaxColormapSize; if (maximum_colors > MaxColormapSize) maximum_colors=MaxColormapSize; depth=quantize_info->tree_depth; if (depth == 0) { size_t colors; /* Depth of color tree is: Log4(colormap size)+2. */ colors=maximum_colors; for (depth=1; colors != 0; depth++) colors>>=2; if (quantize_info->dither_method != NoDitherMethod) depth--; } /* Initialize color cube. */ cube_info=GetCubeInfo(quantize_info,depth,maximum_colors); if (cube_info == (CubeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename); return(MagickFalse); } number_images=GetImageListLength(images); image=images; for (i=0; image != (Image *) NULL; i++) { progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor) NULL, image->client_data); status=ClassifyImageColors(cube_info,image,exception); if (status == MagickFalse) break; (void) SetImageProgressMonitor(image,progress_monitor,image->client_data); proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i, number_images); if (proceed == MagickFalse) break; image=GetNextImageInList(image); } if (status != MagickFalse) { /* Reduce the number of colors in an image sequence. */ ReduceImageColors(images,cube_info); image=images; for (i=0; image != (Image *) NULL; i++) { progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor) NULL,image->client_data); status=AssignImageColors(image,cube_info,exception); if (status == MagickFalse) break; (void) SetImageProgressMonitor(image,progress_monitor, image->client_data); proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i, number_images); if (proceed == MagickFalse) break; image=GetNextImageInList(image); } } DestroyCubeInfo(cube_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + Q u a n t i z e E r r o r F l a t t e n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QuantizeErrorFlatten() traverses the color cube and flattens the quantization % error into a sorted 1D array. This accelerates the color reduction process. % % Contributed by Yoya. % % The format of the QuantizeErrorFlatten method is: % % size_t QuantizeErrorFlatten(const CubeInfo *cube_info, % const NodeInfo *node_info,const ssize_t offset, % double *quantize_error) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is current pointer. % % o offset: quantize error offset. % % o quantize_error: the quantization error vector. % */ static size_t QuantizeErrorFlatten(const CubeInfo *cube_info, const NodeInfo *node_info,const ssize_t offset,double *quantize_error) { register ssize_t i; size_t n, number_children; if (offset >= (ssize_t) cube_info->nodes) return(0); quantize_error[offset]=node_info->quantize_error; n=1; number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children ; i++) if (node_info->child[i] != (NodeInfo *) NULL) n+=QuantizeErrorFlatten(cube_info,node_info->child[i],offset+n, quantize_error); return(n); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e d u c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Reduce() traverses the color cube tree and prunes any node whose % quantization error falls below a particular threshold. % % The format of the Reduce method is: % % Reduce(CubeInfo *cube_info,const NodeInfo *node_info) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is to be pruned. % */ static void Reduce(CubeInfo *cube_info,const NodeInfo *node_info) { register ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) Reduce(cube_info,node_info->child[i]); if (node_info->quantize_error <= cube_info->pruning_threshold) PruneChild(cube_info,node_info); else { /* Find minimum pruning threshold. */ if (node_info->number_unique > 0) cube_info->colors++; if (node_info->quantize_error < cube_info->next_threshold) cube_info->next_threshold=node_info->quantize_error; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e d u c e I m a g e C o l o r s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReduceImageColors() repeatedly prunes the tree until the number of nodes % with n2 > 0 is less than or equal to the maximum number of colors allowed % in the output image. On any given iteration over the tree, it selects % those nodes whose E value is minimal for pruning and merges their % color statistics upward. It uses a pruning threshold, Ep, to govern % node selection as follows: % % Ep = 0 % while number of nodes with (n2 > 0) > required maximum number of colors % prune all nodes such that E <= Ep % Set Ep to minimum E in remaining nodes % % This has the effect of minimizing any quantization error when merging % two nodes together. % % When a node to be pruned has offspring, the pruning procedure invokes % itself recursively in order to prune the tree from the leaves upward. % n2, Sr, Sg, and Sb in a node being pruned are always added to the % corresponding data in that node's parent. This retains the pruned % node's color characteristics for later averaging. % % For each node, n2 pixels exist for which that node represents the % smallest volume in RGB space containing those pixel's colors. When n2 % > 0 the node will uniquely define a color in the output image. At the % beginning of reduction, n2 = 0 for all nodes except a the leaves of % the tree which represent colors present in the input image. % % The other pixel count, n1, indicates the total number of colors % within the cubic volume which the node represents. This includes n1 - % n2 pixels whose colors should be defined by nodes at a lower level in % the tree. % % The format of the ReduceImageColors method is: % % ReduceImageColors(const Image *image,CubeInfo *cube_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % */ static int QuantizeErrorCompare(const void *error_p,const void *error_q) { double *p, *q; p=(double *) error_p; q=(double *) error_q; if (*p > *q) return(1); if (fabs(*q-*p) <= MagickEpsilon) return(0); return(-1); } static void ReduceImageColors(const Image *image,CubeInfo *cube_info) { #define ReduceImageTag "Reduce/Image" MagickBooleanType proceed; MagickOffsetType offset; size_t span; cube_info->next_threshold=0.0; if (cube_info->colors > cube_info->maximum_colors) { double *quantize_error; /* Enable rapid reduction of the number of unique colors. */ quantize_error=(double *) AcquireQuantumMemory(cube_info->nodes, sizeof(*quantize_error)); if (quantize_error != (double *) NULL) { (void) QuantizeErrorFlatten(cube_info,cube_info->root,0, quantize_error); qsort(quantize_error,cube_info->nodes,sizeof(double), QuantizeErrorCompare); if (cube_info->nodes > (110*(cube_info->maximum_colors+1)/100)) cube_info->next_threshold=quantize_error[cube_info->nodes-110* (cube_info->maximum_colors+1)/100]; quantize_error=(double *) RelinquishMagickMemory(quantize_error); } } for (span=cube_info->colors; cube_info->colors > cube_info->maximum_colors; ) { cube_info->pruning_threshold=cube_info->next_threshold; cube_info->next_threshold=cube_info->root->quantize_error-1; cube_info->colors=0; Reduce(cube_info,cube_info->root); offset=(MagickOffsetType) span-cube_info->colors; proceed=SetImageProgress(image,ReduceImageTag,offset,span- cube_info->maximum_colors+1); if (proceed == MagickFalse) break; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e m a p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RemapImage() replaces the colors of an image with the closest of the colors % from the reference image. % % The format of the RemapImage method is: % % MagickBooleanType RemapImage(const QuantizeInfo *quantize_info, % Image *image,const Image *remap_image,ExceptionInfo *exception) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o image: the image. % % o remap_image: the reference image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType RemapImage(const QuantizeInfo *quantize_info, Image *image,const Image *remap_image,ExceptionInfo *exception) { CubeInfo *cube_info; MagickBooleanType status; /* Initialize color cube. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(remap_image != (Image *) NULL); assert(remap_image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); cube_info=GetCubeInfo(quantize_info,MaxTreeDepth, quantize_info->number_colors); if (cube_info == (CubeInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ClassifyImageColors(cube_info,remap_image,exception); if (status != MagickFalse) { /* Classify image colors from the reference image. */ cube_info->quantize_info->number_colors=cube_info->colors; status=AssignImageColors(image,cube_info,exception); } DestroyCubeInfo(cube_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e m a p I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RemapImages() replaces the colors of a sequence of images with the % closest color from a reference image. % % The format of the RemapImage method is: % % MagickBooleanType RemapImages(const QuantizeInfo *quantize_info, % Image *images,Image *remap_image,ExceptionInfo *exception) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o images: the image sequence. % % o remap_image: the reference image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType RemapImages(const QuantizeInfo *quantize_info, Image *images,const Image *remap_image,ExceptionInfo *exception) { CubeInfo *cube_info; Image *image; MagickBooleanType status; assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=images; if (remap_image == (Image *) NULL) { /* Create a global colormap for an image sequence. */ status=QuantizeImages(quantize_info,images,exception); return(status); } /* Classify image colors from the reference image. */ cube_info=GetCubeInfo(quantize_info,MaxTreeDepth, quantize_info->number_colors); if (cube_info == (CubeInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ClassifyImageColors(cube_info,remap_image,exception); if (status != MagickFalse) { /* Classify image colors from the reference image. */ cube_info->quantize_info->number_colors=cube_info->colors; image=images; for ( ; image != (Image *) NULL; image=GetNextImageInList(image)) { status=AssignImageColors(image,cube_info,exception); if (status == MagickFalse) break; } } DestroyCubeInfo(cube_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t G r a y s c a l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetGrayscaleImage() converts an image to a PseudoClass grayscale image. % % The format of the SetGrayscaleImage method is: % % MagickBooleanType SetGrayscaleImage(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: The image. % % o exception: return any errors or warnings in this structure. % */ #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static int IntensityCompare(const void *x,const void *y) { double intensity; PixelInfo *color_1, *color_2; color_1=(PixelInfo *) x; color_2=(PixelInfo *) y; intensity=GetPixelInfoIntensity((const Image *) NULL,color_1)- GetPixelInfoIntensity((const Image *) NULL,color_2); if (intensity < (double) INT_MIN) intensity=(double) INT_MIN; if (intensity > (double) INT_MAX) intensity=(double) INT_MAX; return((int) intensity); } #if defined(__cplusplus) || defined(c_plusplus) } #endif static MagickBooleanType SetGrayscaleImage(Image *image, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; PixelInfo *colormap; register ssize_t i; size_t extent; ssize_t *colormap_index, j, y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->type != GrayscaleType) (void) TransformImageColorspace(image,GRAYColorspace,exception); extent=MagickMax(image->colors+1,MagickMax(MaxColormapSize,MaxMap+1)); colormap_index=(ssize_t *) AcquireQuantumMemory(extent, sizeof(*colormap_index)); if (colormap_index == (ssize_t *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); if (image->storage_class != PseudoClass) { (void) memset(colormap_index,(-1),extent*sizeof(*colormap_index)); if (AcquireImageColormap(image,MaxColormapSize,exception) == MagickFalse) { colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } image->colors=0; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register size_t intensity; intensity=ScaleQuantumToMap(GetPixelRed(image,q)); if (colormap_index[intensity] < 0) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SetGrayscaleImage) #endif if (colormap_index[intensity] < 0) { colormap_index[intensity]=(ssize_t) image->colors; image->colormap[image->colors].red=(double) GetPixelRed(image,q); image->colormap[image->colors].green=(double) GetPixelGreen(image,q); image->colormap[image->colors].blue=(double) GetPixelBlue(image,q); image->colors++; } } SetPixelIndex(image,(Quantum) colormap_index[intensity],q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); } (void) memset(colormap_index,0,extent*sizeof(*colormap_index)); for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].alpha=(double) i; qsort((void *) image->colormap,image->colors,sizeof(PixelInfo), IntensityCompare); colormap=(PixelInfo *) AcquireQuantumMemory(image->colors,sizeof(*colormap)); if (colormap == (PixelInfo *) NULL) { colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } j=0; colormap[j]=image->colormap[0]; for (i=0; i < (ssize_t) image->colors; i++) { if (IsPixelInfoEquivalent(&colormap[j],&image->colormap[i]) == MagickFalse) { j++; colormap[j]=image->colormap[i]; } colormap_index[(ssize_t) image->colormap[i].alpha]=j; } image->colors=(size_t) (j+1); image->colormap=(PixelInfo *) RelinquishMagickMemory(image->colormap); image->colormap=colormap; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelIndex(image,(Quantum) colormap_index[ScaleQuantumToMap( GetPixelIndex(image,q))],q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index); image->type=GrayscaleType; if (SetImageMonochrome(image,exception) != MagickFalse) image->type=BilevelType; return(status); }
Pattern.h
/***************************************************************************** * * Copyright (c) 2003-2020 by The University of Queensland * http://www.uq.edu.au * * Primary Business: Queensland, Australia * Licensed under the Apache License, version 2.0 * http://www.apache.org/licenses/LICENSE-2.0 * * Development until 2012 by Earth Systems Science Computational Center (ESSCC) * Development 2012-2013 by School of Earth Sciences * Development from 2014-2017 by Centre for Geoscience Computing (GeoComp) * Development from 2019 by School of Earth and Environmental Sciences ** *****************************************************************************/ /****************************************************************************/ /* Paso: CSC/CSR sparse matrix pattern */ /****************************************************************************/ /* Author: Lutz Gross, l.gross@uq.edu.au */ /****************************************************************************/ #ifndef __PASO_PATTERN_H__ #define __PASO_PATTERN_H__ #include "Paso.h" #include "PasoException.h" #include <escript/IndexList.h> namespace paso { struct Pattern; typedef boost::shared_ptr<Pattern> Pattern_ptr; typedef boost::shared_ptr<const Pattern> const_Pattern_ptr; struct PASO_DLL_API Pattern : boost::enable_shared_from_this<Pattern> { Pattern(int type, dim_t numOutput, dim_t numInput, index_t* ptr, index_t* index); ~Pattern(); Pattern_ptr unrollBlocks(int newType, dim_t outputBlockSize, dim_t inputBlockSize); Pattern_ptr getSubpattern(dim_t newNumRows, dim_t newNumCols, const index_t* rowList, const index_t* newColIndex) const; /// Searches for a maximal independent set MIS in the matrix pattern void mis(index_t* mis_marker) const; void reduceBandwidth(index_t* oldToNew); Pattern_ptr multiply(int type, const_Pattern_ptr other) const; Pattern_ptr binop(int type, const_Pattern_ptr other) const; index_t* borrowMainDiagonalPointer(); static Pattern_ptr fromIndexListArray(dim_t n0, dim_t n, const escript::IndexList* index_list_array, index_t range_min, index_t range_max, index_t index_offset); index_t* borrowColoringPointer(); dim_t getBandwidth(index_t* label) const; inline bool isEmpty() const { return (!ptr && !index); } inline dim_t getNumColors() { // make sure numColors is defined borrowColoringPointer(); return numColors; } inline dim_t maxDeg() const { dim_t deg = 0; #pragma omp parallel { dim_t loc_deg=0; #pragma omp for for (dim_t i = 0; i < numInput; ++i) { loc_deg=std::max(loc_deg, ptr[i+1]-ptr[i]); } #pragma omp critical { deg = std::max(deg, loc_deg); } } return deg; } // convert csr row ptr and col indices to harwell-boeing format inline void csrToHB() { // TODO: add openmp if (! (type & (MATRIX_FORMAT_OFFSET1 + MATRIX_FORMAT_BLK1)) ) { throw PasoException( "Paso: Harwell-Boeing format requires CSR format with index offset 1 and block size 1."); } if ( !(hb_row == NULL && hb_col == NULL) ) { return; } hb_row = new index_t[len]; hb_col = new index_t[len]; for (dim_t i=0, k=0; i<numOutput; i++) { for (dim_t j=ptr[i]; j<ptr[i+1]; j++, k++) { hb_row[k] = i+1; hb_col[k] = index[j-1]; } } } int type; // Number of rows in the ptr array [CSR] / number of cols for CSC dim_t numOutput; // Number of cols [CSR] dim_t numInput; // number of non-zeros dim_t len; // ptr[n] to ptr[n+1] lists indices (in index) of non-zeros in row n index_t* ptr; // Non-major indices of non-zeros (in CSR this will be col numbers) index_t* index; // pointer to main diagonal entry index_t* main_iptr; // number of colors dim_t numColors; // coloring index: inputs with the same color are not connected index_t* coloring; // row indices in harwell-boeing format index_t* hb_row; // col indices in harwell-boeing format index_t* hb_col; }; } // namespace paso #endif // __PASO_PATTERN_H__
image.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % IIIII M M AAA GGGG EEEEE % % I MM MM A A G E % % I M M M AAAAA G GG EEE % % I M M A A G G E % % IIIII M M A A GGGG EEEEE % % % % % % MagickCore Image Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2016 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/animate.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/client.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colormap.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/composite-private.h" #include "MagickCore/compress.h" #include "MagickCore/constitute.h" #include "MagickCore/delegate.h" #include "MagickCore/display.h" #include "MagickCore/draw.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/gem.h" #include "MagickCore/geometry.h" #include "MagickCore/histogram.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/magic.h" #include "MagickCore/magick.h" #include "MagickCore/magick-private.h" #include "MagickCore/memory_.h" #include "MagickCore/module.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/paint.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/profile.h" #include "MagickCore/property.h" #include "MagickCore/quantize.h" #include "MagickCore/random_.h" #include "MagickCore/resource_.h" #include "MagickCore/segment.h" #include "MagickCore/semaphore.h" #include "MagickCore/signature-private.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/threshold.h" #include "MagickCore/timer.h" #include "MagickCore/token.h" #include "MagickCore/utility.h" #include "MagickCore/utility-private.h" #include "MagickCore/version.h" #include "MagickCore/xwindow-private.h" /* Constant declaration. */ const char AlphaColor[] = "#bdbdbd", /* gray */ BackgroundColor[] = "#ffffff", /* white */ BorderColor[] = "#dfdfdf", /* gray */ DefaultTileFrame[] = "15x15+3+3", DefaultTileGeometry[] = "120x120+4+3>", DefaultTileLabel[] = "%f\n%G\n%b", ForegroundColor[] = "#000", /* black */ LoadImageTag[] = "Load/Image", LoadImagesTag[] = "Load/Images", PSDensityGeometry[] = "72.0x72.0", PSPageGeometry[] = "612x792", SaveImageTag[] = "Save/Image", SaveImagesTag[] = "Save/Images", TransparentColor[] = "#00000000"; /* transparent black */ const double DefaultResolution = 72.0; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireImage() returns a pointer to an image structure initialized to % default values. % % The format of the AcquireImage method is: % % Image *AcquireImage(const ImageInfo *image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: Many of the image default values are set from this % structure. For example, filename, compression, depth, background color, % and others. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AcquireImage(const ImageInfo *image_info, ExceptionInfo *exception) { const char *option; Image *image; MagickStatusType flags; /* Allocate image structure. */ (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); image=(Image *) AcquireMagickMemory(sizeof(*image)); if (image == (Image *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) ResetMagickMemory(image,0,sizeof(*image)); /* Initialize Image structure. */ (void) CopyMagickString(image->magick,"MIFF",MagickPathExtent); image->storage_class=DirectClass; image->depth=MAGICKCORE_QUANTUM_DEPTH; image->colorspace=sRGBColorspace; image->rendering_intent=PerceptualIntent; image->gamma=1.000f/2.200f; image->chromaticity.red_primary.x=0.6400f; image->chromaticity.red_primary.y=0.3300f; image->chromaticity.red_primary.z=0.0300f; image->chromaticity.green_primary.x=0.3000f; image->chromaticity.green_primary.y=0.6000f; image->chromaticity.green_primary.z=0.1000f; image->chromaticity.blue_primary.x=0.1500f; image->chromaticity.blue_primary.y=0.0600f; image->chromaticity.blue_primary.z=0.7900f; image->chromaticity.white_point.x=0.3127f; image->chromaticity.white_point.y=0.3290f; image->chromaticity.white_point.z=0.3583f; image->interlace=NoInterlace; image->ticks_per_second=UndefinedTicksPerSecond; image->compose=OverCompositeOp; (void) QueryColorCompliance(AlphaColor,AllCompliance,&image->alpha_color, exception); (void) QueryColorCompliance(BackgroundColor,AllCompliance, &image->background_color,exception); (void) QueryColorCompliance(BorderColor,AllCompliance,&image->border_color, exception); (void) QueryColorCompliance(TransparentColor,AllCompliance, &image->transparent_color,exception); GetTimerInfo(&image->timer); image->cache=AcquirePixelCache(0); image->channel_mask=DefaultChannels; image->channel_map=AcquirePixelChannelMap(); image->blob=CloneBlobInfo((BlobInfo *) NULL); image->timestamp=time((time_t *) NULL); image->debug=IsEventLogging(); image->reference_count=1; image->semaphore=AcquireSemaphoreInfo(); image->signature=MagickCoreSignature; if (image_info == (ImageInfo *) NULL) return(image); /* Transfer image info. */ SetBlobExempt(image,image_info->file != (FILE *) NULL ? MagickTrue : MagickFalse); (void) CopyMagickString(image->filename,image_info->filename, MagickPathExtent); (void) CopyMagickString(image->magick_filename,image_info->filename, MagickPathExtent); (void) CopyMagickString(image->magick,image_info->magick,MagickPathExtent); if (image_info->size != (char *) NULL) { (void) ParseAbsoluteGeometry(image_info->size,&image->extract_info); image->columns=image->extract_info.width; image->rows=image->extract_info.height; image->offset=image->extract_info.x; image->extract_info.x=0; image->extract_info.y=0; } if (image_info->extract != (char *) NULL) { RectangleInfo geometry; flags=ParseAbsoluteGeometry(image_info->extract,&geometry); if (((flags & XValue) != 0) || ((flags & YValue) != 0)) { image->extract_info=geometry; Swap(image->columns,image->extract_info.width); Swap(image->rows,image->extract_info.height); } } image->compression=image_info->compression; image->quality=image_info->quality; image->endian=image_info->endian; image->interlace=image_info->interlace; image->units=image_info->units; if (image_info->density != (char *) NULL) { GeometryInfo geometry_info; flags=ParseGeometry(image_info->density,&geometry_info); image->resolution.x=geometry_info.rho; image->resolution.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->resolution.y=image->resolution.x; } if (image_info->page != (char *) NULL) { char *geometry; image->page=image->extract_info; geometry=GetPageGeometry(image_info->page); (void) ParseAbsoluteGeometry(geometry,&image->page); geometry=DestroyString(geometry); } if (image_info->depth != 0) image->depth=image_info->depth; image->dither=image_info->dither; image->alpha_color=image_info->alpha_color; image->background_color=image_info->background_color; image->border_color=image_info->border_color; image->transparent_color=image_info->transparent_color; image->ping=image_info->ping; image->progress_monitor=image_info->progress_monitor; image->client_data=image_info->client_data; if (image_info->cache != (void *) NULL) ClonePixelCacheMethods(image->cache,image_info->cache); /* Set all global options that map to per-image settings. */ (void) SyncImageSettings(image_info,image,exception); /* Global options that are only set for new images. */ option=GetImageOption(image_info,"delay"); if (option != (const char *) NULL) { GeometryInfo geometry_info; flags=ParseGeometry(option,&geometry_info); if ((flags & GreaterValue) != 0) { if (image->delay > (size_t) floor(geometry_info.rho+0.5)) image->delay=(size_t) floor(geometry_info.rho+0.5); } else if ((flags & LessValue) != 0) { if (image->delay < (size_t) floor(geometry_info.rho+0.5)) image->ticks_per_second=(ssize_t) floor(geometry_info.sigma+0.5); } else image->delay=(size_t) floor(geometry_info.rho+0.5); if ((flags & SigmaValue) != 0) image->ticks_per_second=(ssize_t) floor(geometry_info.sigma+0.5); } option=GetImageOption(image_info,"dispose"); if (option != (const char *) NULL) image->dispose=(DisposeType) ParseCommandOption(MagickDisposeOptions, MagickFalse,option); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireImageInfo() allocates the ImageInfo structure. % % The format of the AcquireImageInfo method is: % % ImageInfo *AcquireImageInfo(void) % */ MagickExport ImageInfo *AcquireImageInfo(void) { ImageInfo *image_info; image_info=(ImageInfo *) AcquireMagickMemory(sizeof(*image_info)); if (image_info == (ImageInfo *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); GetImageInfo(image_info); return(image_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e N e x t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireNextImage() initializes the next image in a sequence to % default values. The next member of image points to the newly allocated % image. If there is a memory shortage, next is assigned NULL. % % The format of the AcquireNextImage method is: % % void AcquireNextImage(const ImageInfo *image_info,Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: Many of the image default values are set from this % structure. For example, filename, compression, depth, background color, % and others. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport void AcquireNextImage(const ImageInfo *image_info,Image *image, ExceptionInfo *exception) { /* Allocate image structure. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); image->next=AcquireImage(image_info,exception); if (GetNextImageInList(image) == (Image *) NULL) return; (void) CopyMagickString(GetNextImageInList(image)->filename,image->filename, MagickPathExtent); if (image_info != (ImageInfo *) NULL) (void) CopyMagickString(GetNextImageInList(image)->filename, image_info->filename,MagickPathExtent); DestroyBlob(GetNextImageInList(image)); image->next->blob=ReferenceBlob(image->blob); image->next->endian=image->endian; image->next->scene=image->scene+1; image->next->previous=image; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A p p e n d I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AppendImages() takes all images from the current image pointer to the end % of the image list and appends them to each other top-to-bottom if the % stack parameter is true, otherwise left-to-right. % % The current gravity setting effects how the image is justified in the % final image. % % The format of the AppendImages method is: % % Image *AppendImages(const Image *images,const MagickBooleanType stack, % ExceptionInfo *exception) % % A description of each parameter follows: % % o images: the image sequence. % % o stack: A value other than 0 stacks the images top-to-bottom. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AppendImages(const Image *images, const MagickBooleanType stack,ExceptionInfo *exception) { #define AppendImageTag "Append/Image" CacheView *append_view; Image *append_image; MagickBooleanType status; MagickOffsetType n; PixelTrait alpha_trait; RectangleInfo geometry; register const Image *next; size_t depth, height, number_images, width; ssize_t x_offset, y, y_offset; /* Compute maximum area of appended area. */ assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); alpha_trait=images->alpha_trait; number_images=1; width=images->columns; height=images->rows; depth=images->depth; next=GetNextImageInList(images); for ( ; next != (Image *) NULL; next=GetNextImageInList(next)) { if (next->depth > depth) depth=next->depth; if (next->alpha_trait != UndefinedPixelTrait) alpha_trait=BlendPixelTrait; number_images++; if (stack != MagickFalse) { if (next->columns > width) width=next->columns; height+=next->rows; continue; } width+=next->columns; if (next->rows > height) height=next->rows; } /* Append images. */ append_image=CloneImage(images,width,height,MagickTrue,exception); if (append_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(append_image,DirectClass,exception) == MagickFalse) { append_image=DestroyImage(append_image); return((Image *) NULL); } append_image->depth=depth; append_image->alpha_trait=alpha_trait; (void) SetImageBackgroundColor(append_image,exception); status=MagickTrue; x_offset=0; y_offset=0; next=images; append_view=AcquireAuthenticCacheView(append_image,exception); for (n=0; n < (MagickOffsetType) number_images; n++) { CacheView *image_view; MagickBooleanType proceed; SetGeometry(append_image,&geometry); GravityAdjustGeometry(next->columns,next->rows,next->gravity,&geometry); if (stack != MagickFalse) x_offset-=geometry.x; else y_offset-=geometry.y; image_view=AcquireVirtualCacheView(next,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(next,next,next->rows,1) #endif for (y=0; y < (ssize_t) next->rows; y++) { MagickBooleanType sync; PixelInfo pixel; register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception); q=QueueCacheViewAuthenticPixels(append_view,x_offset,y+y_offset, next->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } GetPixelInfo(next,&pixel); for (x=0; x < (ssize_t) next->columns; x++) { if (GetPixelReadMask(next,p) == 0) { SetPixelBackgoundColor(append_image,q); p+=GetPixelChannels(next); q+=GetPixelChannels(append_image); continue; } GetPixelInfoPixel(next,p,&pixel); SetPixelViaPixelInfo(append_image,&pixel,q); p+=GetPixelChannels(next); q+=GetPixelChannels(append_image); } sync=SyncCacheViewAuthenticPixels(append_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (stack == MagickFalse) { x_offset+=(ssize_t) next->columns; y_offset=0; } else { x_offset=0; y_offset+=(ssize_t) next->rows; } proceed=SetImageProgress(append_image,AppendImageTag,n,number_images); if (proceed == MagickFalse) break; next=GetNextImageInList(next); } append_view=DestroyCacheView(append_view); if (status == MagickFalse) append_image=DestroyImage(append_image); return(append_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C a t c h I m a g e E x c e p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CatchImageException() returns if no exceptions are found in the image % sequence, otherwise it determines the most severe exception and reports % it as a warning or error depending on the severity. % % The format of the CatchImageException method is: % % ExceptionType CatchImageException(Image *image) % % A description of each parameter follows: % % o image: An image sequence. % */ MagickExport ExceptionType CatchImageException(Image *image) { ExceptionInfo *exception; ExceptionType severity; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); exception=AcquireExceptionInfo(); CatchException(exception); severity=exception->severity; exception=DestroyExceptionInfo(exception); return(severity); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l i p I m a g e P a t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClipImagePath() sets the image clip mask based any clipping path information % if it exists. % % The format of the ClipImagePath method is: % % MagickBooleanType ClipImagePath(Image *image,const char *pathname, % const MagickBooleanType inside,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o pathname: name of clipping path resource. If name is preceded by #, use % clipping path numbered by name. % % o inside: if non-zero, later operations take effect inside clipping path. % Otherwise later operations take effect outside clipping path. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ClipImage(Image *image,ExceptionInfo *exception) { return(ClipImagePath(image,"#1",MagickTrue,exception)); } MagickExport MagickBooleanType ClipImagePath(Image *image,const char *pathname, const MagickBooleanType inside,ExceptionInfo *exception) { #define ClipImagePathTag "ClipPath/Image" char *property; const char *value; Image *clip_mask; ImageInfo *image_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(pathname != NULL); property=AcquireString(pathname); (void) FormatLocaleString(property,MagickPathExtent,"8BIM:1999,2998:%s", pathname); value=GetImageProperty(image,property,exception); property=DestroyString(property); if (value == (const char *) NULL) { ThrowFileException(exception,OptionError,"NoClipPathDefined", image->filename); return(MagickFalse); } image_info=AcquireImageInfo(); (void) CopyMagickString(image_info->filename,image->filename, MagickPathExtent); (void) ConcatenateMagickString(image_info->filename,pathname, MagickPathExtent); clip_mask=BlobToImage(image_info,value,strlen(value),exception); image_info=DestroyImageInfo(image_info); if (clip_mask == (Image *) NULL) return(MagickFalse); if (clip_mask->storage_class == PseudoClass) { (void) SyncImage(clip_mask,exception); if (SetImageStorageClass(clip_mask,DirectClass,exception) == MagickFalse) return(MagickFalse); } if (inside == MagickFalse) (void) NegateImage(clip_mask,MagickFalse,exception); (void) FormatLocaleString(clip_mask->magick_filename,MagickPathExtent, "8BIM:1999,2998:%s\nPS",pathname); (void) SetImageMask(image,ReadPixelMask,clip_mask,exception); clip_mask=DestroyImage(clip_mask); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneImage() copies an image and returns the copy as a new image object. % % If the specified columns and rows is 0, an exact copy of the image is % returned, otherwise the pixel data is undefined and must be initialized % with the QueueAuthenticPixels() and SyncAuthenticPixels() methods. On % failure, a NULL image is returned and exception describes the reason for the % failure. % % The format of the CloneImage method is: % % Image *CloneImage(const Image *image,const size_t columns, % const size_t rows,const MagickBooleanType orphan, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the cloned image. % % o rows: the number of rows in the cloned image. % % o detach: With a value other than 0, the cloned image is detached from % its parent I/O stream. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *CloneImage(const Image *image,const size_t columns, const size_t rows,const MagickBooleanType detach,ExceptionInfo *exception) { Image *clone_image; double scale; size_t length; /* Clone the image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if ((image->columns == 0) || (image->rows == 0)) { (void) ThrowMagickException(exception,GetMagickModule(),CorruptImageError, "NegativeOrZeroImageSize","`%s'",image->filename); return((Image *) NULL); } clone_image=(Image *) AcquireMagickMemory(sizeof(*clone_image)); if (clone_image == (Image *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); (void) ResetMagickMemory(clone_image,0,sizeof(*clone_image)); clone_image->signature=MagickCoreSignature; clone_image->storage_class=image->storage_class; clone_image->number_channels=image->number_channels; clone_image->number_meta_channels=image->number_meta_channels; clone_image->metacontent_extent=image->metacontent_extent; clone_image->colorspace=image->colorspace; clone_image->read_mask=image->read_mask; clone_image->write_mask=image->write_mask; clone_image->alpha_trait=image->alpha_trait; clone_image->columns=image->columns; clone_image->rows=image->rows; clone_image->dither=image->dither; if (image->colormap != (PixelInfo *) NULL) { /* Allocate and copy the image colormap. */ clone_image->colors=image->colors; length=(size_t) image->colors; clone_image->colormap=(PixelInfo *) AcquireQuantumMemory(length, sizeof(*clone_image->colormap)); if (clone_image->colormap == (PixelInfo *) NULL) { clone_image=DestroyImage(clone_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } (void) CopyMagickMemory(clone_image->colormap,image->colormap,length* sizeof(*clone_image->colormap)); } clone_image->image_info=CloneImageInfo(image->image_info); (void) CloneImageProfiles(clone_image,image); (void) CloneImageProperties(clone_image,image); (void) CloneImageArtifacts(clone_image,image); GetTimerInfo(&clone_image->timer); if (image->ascii85 != (void *) NULL) Ascii85Initialize(clone_image); clone_image->magick_columns=image->magick_columns; clone_image->magick_rows=image->magick_rows; clone_image->type=image->type; clone_image->channel_mask=image->channel_mask; clone_image->channel_map=ClonePixelChannelMap(image->channel_map); (void) CopyMagickString(clone_image->magick_filename,image->magick_filename, MagickPathExtent); (void) CopyMagickString(clone_image->magick,image->magick,MagickPathExtent); (void) CopyMagickString(clone_image->filename,image->filename, MagickPathExtent); clone_image->progress_monitor=image->progress_monitor; clone_image->client_data=image->client_data; clone_image->reference_count=1; clone_image->next=image->next; clone_image->previous=image->previous; clone_image->list=NewImageList(); if (detach == MagickFalse) clone_image->blob=ReferenceBlob(image->blob); else { clone_image->next=NewImageList(); clone_image->previous=NewImageList(); clone_image->blob=CloneBlobInfo((BlobInfo *) NULL); } clone_image->ping=image->ping; clone_image->debug=IsEventLogging(); clone_image->semaphore=AcquireSemaphoreInfo(); if ((columns == 0) || (rows == 0)) { if (image->montage != (char *) NULL) (void) CloneString(&clone_image->montage,image->montage); if (image->directory != (char *) NULL) (void) CloneString(&clone_image->directory,image->directory); clone_image->cache=ReferencePixelCache(image->cache); return(clone_image); } scale=1.0; if (image->columns != 0) scale=(double) columns/(double) image->columns; clone_image->page.width=(size_t) floor(scale*image->page.width+0.5); clone_image->page.x=(ssize_t) ceil(scale*image->page.x-0.5); clone_image->tile_offset.x=(ssize_t) ceil(scale*image->tile_offset.x-0.5); scale=1.0; if (image->rows != 0) scale=(double) rows/(double) image->rows; clone_image->page.height=(size_t) floor(scale*image->page.height+0.5); clone_image->page.y=(ssize_t) ceil(scale*image->page.y-0.5); clone_image->tile_offset.y=(ssize_t) ceil(scale*image->tile_offset.y-0.5); clone_image->cache=ClonePixelCache(image->cache); if (SetImageExtent(clone_image,columns,rows,exception) == MagickFalse) clone_image=DestroyImage(clone_image); return(clone_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneImageInfo() makes a copy of the given image info structure. If % NULL is specified, a new image info structure is created initialized to % default values. % % The format of the CloneImageInfo method is: % % ImageInfo *CloneImageInfo(const ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport ImageInfo *CloneImageInfo(const ImageInfo *image_info) { ImageInfo *clone_info; clone_info=AcquireImageInfo(); if (image_info == (ImageInfo *) NULL) return(clone_info); clone_info->compression=image_info->compression; clone_info->temporary=image_info->temporary; clone_info->adjoin=image_info->adjoin; clone_info->antialias=image_info->antialias; clone_info->scene=image_info->scene; clone_info->number_scenes=image_info->number_scenes; clone_info->depth=image_info->depth; (void) CloneString(&clone_info->size,image_info->size); (void) CloneString(&clone_info->extract,image_info->extract); (void) CloneString(&clone_info->scenes,image_info->scenes); (void) CloneString(&clone_info->page,image_info->page); clone_info->interlace=image_info->interlace; clone_info->endian=image_info->endian; clone_info->units=image_info->units; clone_info->quality=image_info->quality; (void) CloneString(&clone_info->sampling_factor,image_info->sampling_factor); (void) CloneString(&clone_info->server_name,image_info->server_name); (void) CloneString(&clone_info->font,image_info->font); (void) CloneString(&clone_info->texture,image_info->texture); (void) CloneString(&clone_info->density,image_info->density); clone_info->pointsize=image_info->pointsize; clone_info->fuzz=image_info->fuzz; clone_info->alpha_color=image_info->alpha_color; clone_info->background_color=image_info->background_color; clone_info->border_color=image_info->border_color; clone_info->transparent_color=image_info->transparent_color; clone_info->dither=image_info->dither; clone_info->monochrome=image_info->monochrome; clone_info->colorspace=image_info->colorspace; clone_info->type=image_info->type; clone_info->orientation=image_info->orientation; clone_info->ping=image_info->ping; clone_info->verbose=image_info->verbose; clone_info->progress_monitor=image_info->progress_monitor; clone_info->client_data=image_info->client_data; clone_info->cache=image_info->cache; if (image_info->cache != (void *) NULL) clone_info->cache=ReferencePixelCache(image_info->cache); if (image_info->profile != (void *) NULL) clone_info->profile=(void *) CloneStringInfo((StringInfo *) image_info->profile); SetImageInfoFile(clone_info,image_info->file); SetImageInfoBlob(clone_info,image_info->blob,image_info->length); clone_info->stream=image_info->stream; (void) CopyMagickString(clone_info->magick,image_info->magick, MagickPathExtent); (void) CopyMagickString(clone_info->unique,image_info->unique, MagickPathExtent); (void) CopyMagickString(clone_info->filename,image_info->filename, MagickPathExtent); clone_info->channel=image_info->channel; (void) CloneImageOptions(clone_info,image_info); clone_info->debug=IsEventLogging(); clone_info->signature=image_info->signature; return(clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o p y I m a g e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CopyImagePixels() copies pixels from the source image as defined by the % geometry the destination image at the specified offset. % % The format of the CopyImagePixels method is: % % MagickBooleanType CopyImagePixels(Image *image,const Image *source_image, % const RectangleInfo *geometry,const OffsetInfo *offset, % ExceptionInfo *exception); % % A description of each parameter follows: % % o image: the destination image. % % o source_image: the source image. % % o geometry: define the dimensions of the source pixel rectangle. % % o offset: define the offset in the destination image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType CopyImagePixels(Image *image, const Image *source_image,const RectangleInfo *geometry, const OffsetInfo *offset,ExceptionInfo *exception) { #define CopyImageTag "Copy/Image" CacheView *image_view, *source_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(source_image != (Image *) NULL); assert(geometry != (RectangleInfo *) NULL); assert(offset != (OffsetInfo *) NULL); if ((offset->x < 0) || (offset->y < 0) || ((ssize_t) (offset->x+geometry->width) > (ssize_t) image->columns) || ((ssize_t) (offset->y+geometry->height) > (ssize_t) image->rows)) ThrowBinaryException(OptionError,"GeometryDoesNotContainImage", image->filename); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); /* Copy image pixels. */ status=MagickTrue; progress=0; source_view=AcquireVirtualCacheView(source_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,source_image,geometry->height,1) #endif for (y=0; y < (ssize_t) geometry->height; y++) { MagickBooleanType sync; register const Quantum *magick_restrict p; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(source_view,geometry->x,y+geometry->y, geometry->width,1,exception); q=QueueCacheViewAuthenticPixels(image_view,offset->x,y+offset->y, geometry->width,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) geometry->width; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); PixelTrait source_traits=GetPixelChannelTraits(source_image,channel); if ((traits == UndefinedPixelTrait) || (source_traits == UndefinedPixelTrait)) continue; SetPixelChannel(image,channel,p[i],q); } p+=GetPixelChannels(source_image); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_CopyImage) #endif proceed=SetImageProgress(image,CopyImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImage() dereferences an image, deallocating memory associated with % the image if the reference count becomes zero. % % The format of the DestroyImage method is: % % Image *DestroyImage(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport Image *DestroyImage(Image *image) { MagickBooleanType destroy; /* Dereference image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); destroy=MagickFalse; LockSemaphoreInfo(image->semaphore); image->reference_count--; if (image->reference_count == 0) destroy=MagickTrue; UnlockSemaphoreInfo(image->semaphore); if (destroy == MagickFalse) return((Image *) NULL); /* Destroy image. */ DestroyImagePixels(image); image->channel_map=DestroyPixelChannelMap(image->channel_map); if (image->montage != (char *) NULL) image->montage=DestroyString(image->montage); if (image->directory != (char *) NULL) image->directory=DestroyString(image->directory); if (image->colormap != (PixelInfo *) NULL) image->colormap=(PixelInfo *) RelinquishMagickMemory(image->colormap); if (image->geometry != (char *) NULL) image->geometry=DestroyString(image->geometry); DestroyImageProfiles(image); DestroyImageProperties(image); DestroyImageArtifacts(image); if (image->ascii85 != (Ascii85Info *) NULL) image->ascii85=(Ascii85Info *) RelinquishMagickMemory(image->ascii85); if (image->image_info != (ImageInfo *) NULL) image->image_info=DestroyImageInfo(image->image_info); DestroyBlob(image); if (image->semaphore != (SemaphoreInfo *) NULL) RelinquishSemaphoreInfo(&image->semaphore); image->signature=(~MagickCoreSignature); image=(Image *) RelinquishMagickMemory(image); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImageInfo() deallocates memory associated with an ImageInfo % structure. % % The format of the DestroyImageInfo method is: % % ImageInfo *DestroyImageInfo(ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport ImageInfo *DestroyImageInfo(ImageInfo *image_info) { assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); if (image_info->size != (char *) NULL) image_info->size=DestroyString(image_info->size); if (image_info->extract != (char *) NULL) image_info->extract=DestroyString(image_info->extract); if (image_info->scenes != (char *) NULL) image_info->scenes=DestroyString(image_info->scenes); if (image_info->page != (char *) NULL) image_info->page=DestroyString(image_info->page); if (image_info->sampling_factor != (char *) NULL) image_info->sampling_factor=DestroyString( image_info->sampling_factor); if (image_info->server_name != (char *) NULL) image_info->server_name=DestroyString( image_info->server_name); if (image_info->font != (char *) NULL) image_info->font=DestroyString(image_info->font); if (image_info->texture != (char *) NULL) image_info->texture=DestroyString(image_info->texture); if (image_info->density != (char *) NULL) image_info->density=DestroyString(image_info->density); if (image_info->cache != (void *) NULL) image_info->cache=DestroyPixelCache(image_info->cache); if (image_info->profile != (StringInfo *) NULL) image_info->profile=(void *) DestroyStringInfo((StringInfo *) image_info->profile); DestroyImageOptions(image_info); image_info->signature=(~MagickCoreSignature); image_info=(ImageInfo *) RelinquishMagickMemory(image_info); return(image_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D i s a s s o c i a t e I m a g e S t r e a m % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DisassociateImageStream() disassociates the image stream. It checks if the % blob of the specified image is referenced by other images. If the reference % count is higher then 1 a new blob is assigned to the specified image. % % The format of the DisassociateImageStream method is: % % void DisassociateImageStream(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void DisassociateImageStream(Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); DisassociateBlob(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageInfo() initializes image_info to default values. % % The format of the GetImageInfo method is: % % void GetImageInfo(ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport void GetImageInfo(ImageInfo *image_info) { char *synchronize; ExceptionInfo *exception; /* File and image dimension members. */ (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image_info != (ImageInfo *) NULL); (void) ResetMagickMemory(image_info,0,sizeof(*image_info)); image_info->adjoin=MagickTrue; image_info->interlace=NoInterlace; image_info->channel=DefaultChannels; image_info->quality=UndefinedCompressionQuality; image_info->antialias=MagickTrue; image_info->dither=MagickTrue; synchronize=GetEnvironmentValue("MAGICK_SYNCHRONIZE"); if (synchronize != (const char *) NULL) { image_info->synchronize=IsStringTrue(synchronize); synchronize=DestroyString(synchronize); } exception=AcquireExceptionInfo(); (void) QueryColorCompliance(AlphaColor,AllCompliance,&image_info->alpha_color, exception); (void) QueryColorCompliance(BackgroundColor,AllCompliance, &image_info->background_color,exception); (void) QueryColorCompliance(BorderColor,AllCompliance, &image_info->border_color,exception); (void) QueryColorCompliance(TransparentColor,AllCompliance, &image_info->transparent_color,exception); exception=DestroyExceptionInfo(exception); image_info->debug=IsEventLogging(); image_info->signature=MagickCoreSignature; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e I n f o F i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageInfoFile() returns the image info file member. % % The format of the GetImageInfoFile method is: % % FILE *GetImageInfoFile(const ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport FILE *GetImageInfoFile(const ImageInfo *image_info) { return(image_info->file); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageMask() returns the mask associated with the image. % % The format of the GetImageMask method is: % % Image *GetImageMask(const Image *image,const PixelMask type, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o type: the mask type, ReadPixelMask or WritePixelMask. % */ MagickExport Image *GetImageMask(const Image *image,const PixelMask type, ExceptionInfo *exception) { CacheView *mask_view, *image_view; Image *mask_image; MagickBooleanType status; ssize_t y; /* Get image mask. */ assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); mask_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if (mask_image == (Image *) NULL) return((Image *) NULL); status=MagickTrue; mask_image->alpha_trait=UndefinedPixelTrait; (void) SetImageColorspace(mask_image,GRAYColorspace,exception); mask_image->read_mask=MagickFalse; image_view=AcquireVirtualCacheView(image,exception); mask_view=AcquireAuthenticCacheView(mask_image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewAuthenticPixels(mask_view,0,y,mask_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { switch (type) { case WritePixelMask: { SetPixelGray(mask_image,GetPixelWriteMask(image,p),q); break; } default: { SetPixelGray(mask_image,GetPixelReadMask(image,p),q); break; } } p+=GetPixelChannels(image); q+=GetPixelChannels(mask_image); } if (SyncCacheViewAuthenticPixels(mask_view,exception) == MagickFalse) status=MagickFalse; } mask_view=DestroyCacheView(mask_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) mask_image=DestroyImage(mask_image); return(mask_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e R e f e r e n c e C o u n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageReferenceCount() returns the image reference count. % % The format of the GetReferenceCount method is: % % ssize_t GetImageReferenceCount(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport ssize_t GetImageReferenceCount(Image *image) { ssize_t reference_count; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); LockSemaphoreInfo(image->semaphore); reference_count=image->reference_count; UnlockSemaphoreInfo(image->semaphore); return(reference_count); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i r t u a l P i x e l M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageVirtualPixelMethod() gets the "virtual pixels" method for the % image. A virtual pixel is any pixel access that is outside the boundaries % of the image cache. % % The format of the GetImageVirtualPixelMethod() method is: % % VirtualPixelMethod GetImageVirtualPixelMethod(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport VirtualPixelMethod GetImageVirtualPixelMethod(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); return(GetPixelCacheVirtualMethod(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I n t e r p r e t I m a g e F i l e n a m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InterpretImageFilename() interprets embedded characters in an image filename. % The filename length is returned. % % The format of the InterpretImageFilename method is: % % size_t InterpretImageFilename(const ImageInfo *image_info,Image *image, % const char *format,int value,char *filename,ExceptionInfo *exception) % % A description of each parameter follows. % % o image_info: the image info.. % % o image: the image. % % o format: A filename describing the format to use to write the numeric % argument. Only the first numeric format identifier is replaced. % % o value: Numeric value to substitute into format filename. % % o filename: return the formatted filename in this character buffer. % % o exception: return any errors or warnings in this structure. % */ MagickExport size_t InterpretImageFilename(const ImageInfo *image_info, Image *image,const char *format,int value,char *filename, ExceptionInfo *exception) { char *q; int c; MagickBooleanType canonical; register const char *p; size_t length; canonical=MagickFalse; length=0; (void) CopyMagickString(filename,format,MagickPathExtent); for (p=strchr(format,'%'); p != (char *) NULL; p=strchr(p+1,'%')) { q=(char *) p+1; if (*q == '%') { p=q+1; continue; } if (*q == '0') { ssize_t foo; foo=(ssize_t) strtol(q,&q,10); (void) foo; } switch (*q) { case 'd': case 'o': case 'x': { q++; c=(*q); *q='\0'; (void) FormatLocaleString(filename+(p-format),(size_t) (MagickPathExtent-(p-format)),p,value); *q=c; (void) ConcatenateMagickString(filename,q,MagickPathExtent); canonical=MagickTrue; if (*(q-1) != '%') break; p++; break; } case '[': { char pattern[MagickPathExtent]; const char *option; register char *r; register ssize_t i; ssize_t depth; /* Image option. */ /* FUTURE: Compare update with code from InterpretImageProperties() Note that a 'filename:' property should not need depth recursion. */ if (strchr(p,']') == (char *) NULL) break; depth=1; r=q+1; for (i=0; (i < (MagickPathExtent-1L)) && (*r != '\0'); i++) { if (*r == '[') depth++; if (*r == ']') depth--; if (depth <= 0) break; pattern[i]=(*r++); } pattern[i]='\0'; if (LocaleNCompare(pattern,"filename:",9) != 0) break; option=(const char *) NULL; if (image != (Image *) NULL) option=GetImageProperty(image,pattern,exception); if ((option == (const char *) NULL) && (image != (Image *) NULL)) option=GetImageArtifact(image,pattern); if ((option == (const char *) NULL) && (image_info != (ImageInfo *) NULL)) option=GetImageOption(image_info,pattern); if (option == (const char *) NULL) break; q--; c=(*q); *q='\0'; (void) CopyMagickString(filename+(p-format-length),option,(size_t) (MagickPathExtent-(p-format-length))); length+=strlen(pattern)-1; *q=c; (void) ConcatenateMagickString(filename,r+1,MagickPathExtent); canonical=MagickTrue; if (*(q-1) != '%') break; p++; break; } default: break; } } for (q=filename; *q != '\0'; q++) if ((*q == '%') && (*(q+1) == '%')) { (void) CopyMagickString(q,q+1,(size_t) (MagickPathExtent-(q-filename))); canonical=MagickTrue; } if (canonical == MagickFalse) (void) CopyMagickString(filename,format,MagickPathExtent); return(strlen(filename)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s H i g h D y n a m i c R a n g e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsHighDynamicRangeImage() returns MagickTrue if any pixel component is % non-integer or exceeds the bounds of the quantum depth (e.g. for Q16 % 0..65535. % % The format of the IsHighDynamicRangeImage method is: % % MagickBooleanType IsHighDynamicRangeImage(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType IsHighDynamicRangeImage(const Image *image, ExceptionInfo *exception) { #if !defined(MAGICKCORE_HDRI_SUPPORT) (void) image; (void) exception; return(MagickFalse); #else CacheView *image_view; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; if (GetPixelReadMask(image,p) == 0) { p+=GetPixelChannels(image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double pixel; PixelTrait traits; traits=GetPixelChannelTraits(image,(PixelChannel) i); if (traits == UndefinedPixelTrait) continue; pixel=(double) p[i]; if ((pixel < 0.0) || (pixel > QuantumRange) || (pixel != (double) ((QuantumAny) pixel))) break; } p+=GetPixelChannels(image); if (i < (ssize_t) GetPixelChannels(image)) status=MagickFalse; } if (x < (ssize_t) image->columns) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status != MagickFalse ? MagickFalse : MagickTrue); #endif } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s I m a g e O b j e c t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsImageObject() returns MagickTrue if the image sequence contains a valid % set of image objects. % % The format of the IsImageObject method is: % % MagickBooleanType IsImageObject(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType IsImageObject(const Image *image) { register const Image *p; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); for (p=image; p != (Image *) NULL; p=GetNextImageInList(p)) if (p->signature != MagickCoreSignature) return(MagickFalse); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s T a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsTaintImage() returns MagickTrue any pixel in the image has been altered % since it was first constituted. % % The format of the IsTaintImage method is: % % MagickBooleanType IsTaintImage(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType IsTaintImage(const Image *image) { char magick[MagickPathExtent], filename[MagickPathExtent]; register const Image *p; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); (void) CopyMagickString(magick,image->magick,MagickPathExtent); (void) CopyMagickString(filename,image->filename,MagickPathExtent); for (p=image; p != (Image *) NULL; p=GetNextImageInList(p)) { if (p->taint != MagickFalse) return(MagickTrue); if (LocaleCompare(p->magick,magick) != 0) return(MagickTrue); if (LocaleCompare(p->filename,filename) != 0) return(MagickTrue); } return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o d i f y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ModifyImage() ensures that there is only a single reference to the image % to be modified, updating the provided image pointer to point to a clone of % the original image if necessary. % % The format of the ModifyImage method is: % % MagickBooleanType ModifyImage(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ModifyImage(Image **image, ExceptionInfo *exception) { Image *clone_image; assert(image != (Image **) NULL); assert(*image != (Image *) NULL); assert((*image)->signature == MagickCoreSignature); if ((*image)->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",(*image)->filename); if (GetImageReferenceCount(*image) <= 1) return(MagickTrue); clone_image=CloneImage(*image,0,0,MagickTrue,exception); LockSemaphoreInfo((*image)->semaphore); (*image)->reference_count--; UnlockSemaphoreInfo((*image)->semaphore); *image=clone_image; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e w M a g i c k I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NewMagickImage() creates a blank image canvas of the specified size and % background color. % % The format of the NewMagickImage method is: % % Image *NewMagickImage(const ImageInfo *image_info,const size_t width, % const size_t height,const PixelInfo *background, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o width: the image width. % % o height: the image height. % % o background: the image color. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *NewMagickImage(const ImageInfo *image_info, const size_t width,const size_t height,const PixelInfo *background, ExceptionInfo *exception) { CacheView *image_view; Image *image; MagickBooleanType status; ssize_t y; assert(image_info != (const ImageInfo *) NULL); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image_info->signature == MagickCoreSignature); assert(background != (const PixelInfo *) NULL); image=AcquireImage(image_info,exception); image->columns=width; image->rows=height; image->colorspace=background->colorspace; image->alpha_trait=background->alpha_trait; image->fuzz=background->fuzz; image->depth=background->depth; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelViaPixelInfo(image,background,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (status == MagickFalse) image=DestroyImage(image); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e f e r e n c e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReferenceImage() increments the reference count associated with an image % returning a pointer to the image. % % The format of the ReferenceImage method is: % % Image *ReferenceImage(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport Image *ReferenceImage(Image *image) { assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); LockSemaphoreInfo(image->semaphore); image->reference_count++; UnlockSemaphoreInfo(image->semaphore); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s e t I m a g e P a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetImagePage() resets the image page canvas and position. % % The format of the ResetImagePage method is: % % MagickBooleanType ResetImagePage(Image *image,const char *page) % % A description of each parameter follows: % % o image: the image. % % o page: the relative page specification. % */ MagickExport MagickBooleanType ResetImagePage(Image *image,const char *page) { MagickStatusType flags; RectangleInfo geometry; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); flags=ParseAbsoluteGeometry(page,&geometry); if ((flags & WidthValue) != 0) { if ((flags & HeightValue) == 0) geometry.height=geometry.width; image->page.width=geometry.width; image->page.height=geometry.height; } if ((flags & AspectValue) != 0) { if ((flags & XValue) != 0) image->page.x+=geometry.x; if ((flags & YValue) != 0) image->page.y+=geometry.y; } else { if ((flags & XValue) != 0) { image->page.x=geometry.x; if ((image->page.width == 0) && (geometry.x > 0)) image->page.width=image->columns+geometry.x; } if ((flags & YValue) != 0) { image->page.y=geometry.y; if ((image->page.height == 0) && (geometry.y > 0)) image->page.height=image->rows+geometry.y; } } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e B a c k g r o u n d C o l o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageBackgroundColor() initializes the image pixels to the image % background color. The background color is defined by the background_color % member of the image structure. % % The format of the SetImage method is: % % MagickBooleanType SetImageBackgroundColor(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageBackgroundColor(Image *image, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; PixelInfo background; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); ConformPixelInfo(image,&image->background_color,&background,exception); /* Set image background color. */ status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelViaPixelInfo(image,&background,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e C h a n n e l M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageChannelMask() sets the image channel mask from the specified channel % mask. % % The format of the SetImageChannelMask method is: % % ChannelType SetImageChannelMask(Image *image, % const ChannelType channel_mask) % % A description of each parameter follows: % % o image: the image. % % o channel_mask: the channel mask. % */ MagickExport ChannelType SetImageChannelMask(Image *image, const ChannelType channel_mask) { return(SetPixelChannelMask(image,channel_mask)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e C o l o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageColor() set the entire image canvas to the specified color. % % The format of the SetImageColor method is: % % MagickBooleanType SetImageColor(Image *image,const PixelInfo *color, % ExeptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o background: the image color. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageColor(Image *image, const PixelInfo *color,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); assert(color != (const PixelInfo *) NULL); image->colorspace=color->colorspace; image->alpha_trait=color->alpha_trait; image->fuzz=color->fuzz; image->depth=color->depth; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelViaPixelInfo(image,color,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e S t o r a g e C l a s s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageStorageClass() sets the image class: DirectClass for true color % images or PseudoClass for colormapped images. % % The format of the SetImageStorageClass method is: % % MagickBooleanType SetImageStorageClass(Image *image, % const ClassType storage_class,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o storage_class: The image class. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageStorageClass(Image *image, const ClassType storage_class,ExceptionInfo *exception) { image->storage_class=storage_class; return(SyncImagePixelCache(image,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e E x t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageExtent() sets the image size (i.e. columns & rows). % % The format of the SetImageExtent method is: % % MagickBooleanType SetImageExtent(Image *image,const size_t columns, % const size_t rows,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: The image width in pixels. % % o rows: The image height in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageExtent(Image *image,const size_t columns, const size_t rows,ExceptionInfo *exception) { if ((columns == 0) || (rows == 0)) ThrowBinaryException(ImageError,"NegativeOrZeroImageSize",image->filename); image->columns=columns; image->rows=rows; if (image->depth > (8*sizeof(MagickSizeType))) ThrowBinaryException(ImageError,"ImageDepthNotSupported",image->filename); return(SyncImagePixelCache(image,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S e t I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageInfo() initializes the 'magick' field of the ImageInfo structure. % It is set to a type of image format based on the prefix or suffix of the % filename. For example, 'ps:image' returns PS indicating a Postscript image. % JPEG is returned for this filename: 'image.jpg'. The filename prefix has % precendence over the suffix. Use an optional index enclosed in brackets % after a file name to specify a desired scene of a multi-resolution image % format like Photo CD (e.g. img0001.pcd[4]). A True (non-zero) return value % indicates success. % % The format of the SetImageInfo method is: % % MagickBooleanType SetImageInfo(ImageInfo *image_info, % const unsigned int frames,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o frames: the number of images you intend to write. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageInfo(ImageInfo *image_info, const unsigned int frames,ExceptionInfo *exception) { char component[MagickPathExtent], magic[MagickPathExtent], *q; const MagicInfo *magic_info; const MagickInfo *magick_info; ExceptionInfo *sans_exception; Image *image; MagickBooleanType status; register const char *p; ssize_t count; /* Look for 'image.format' in filename. */ assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); *component='\0'; GetPathComponent(image_info->filename,SubimagePath,component); if (*component != '\0') { /* Look for scene specification (e.g. img0001.pcd[4]). */ if (IsSceneGeometry(component,MagickFalse) == MagickFalse) { if (IsGeometry(component) != MagickFalse) (void) CloneString(&image_info->extract,component); } else { size_t first, last; (void) CloneString(&image_info->scenes,component); image_info->scene=StringToUnsignedLong(image_info->scenes); image_info->number_scenes=image_info->scene; p=image_info->scenes; for (q=(char *) image_info->scenes; *q != '\0'; p++) { while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == ',')) p++; first=(size_t) strtol(p,&q,10); last=first; while (isspace((int) ((unsigned char) *q)) != 0) q++; if (*q == '-') last=(size_t) strtol(q+1,&q,10); if (first > last) Swap(first,last); if (first < image_info->scene) image_info->scene=first; if (last > image_info->number_scenes) image_info->number_scenes=last; p=q; } image_info->number_scenes-=image_info->scene-1; } } *component='\0'; if (*image_info->magick == '\0') GetPathComponent(image_info->filename,ExtensionPath,component); #if defined(MAGICKCORE_ZLIB_DELEGATE) if (*component != '\0') if ((LocaleCompare(component,"gz") == 0) || (LocaleCompare(component,"Z") == 0) || (LocaleCompare(component,"svgz") == 0) || (LocaleCompare(component,"wmz") == 0)) { char path[MagickPathExtent]; (void) CopyMagickString(path,image_info->filename,MagickPathExtent); path[strlen(path)-strlen(component)-1]='\0'; GetPathComponent(path,ExtensionPath,component); } #endif #if defined(MAGICKCORE_BZLIB_DELEGATE) if (*component != '\0') if (LocaleCompare(component,"bz2") == 0) { char path[MagickPathExtent]; (void) CopyMagickString(path,image_info->filename,MagickPathExtent); path[strlen(path)-strlen(component)-1]='\0'; GetPathComponent(path,ExtensionPath,component); } #endif image_info->affirm=MagickFalse; sans_exception=AcquireExceptionInfo(); if (*component != '\0') { MagickFormatType format_type; register ssize_t i; static const char *format_type_formats[] = { "AUTOTRACE", "BROWSE", "DCRAW", "EDIT", "LAUNCH", "MPEG:DECODE", "MPEG:ENCODE", "PRINT", "PS:ALPHA", "PS:CMYK", "PS:COLOR", "PS:GRAY", "PS:MONO", "SCAN", "SHOW", "WIN", (char *) NULL }; /* User specified image format. */ (void) CopyMagickString(magic,component,MagickPathExtent); LocaleUpper(magic); /* Look for explicit image formats. */ format_type=UndefinedFormatType; magick_info=GetMagickInfo(magic,sans_exception); if ((magick_info != (const MagickInfo *) NULL) && (magick_info->format_type != UndefinedFormatType)) format_type=magick_info->format_type; i=0; while ((format_type == UndefinedFormatType) && (format_type_formats[i] != (char *) NULL)) { if ((*magic == *format_type_formats[i]) && (LocaleCompare(magic,format_type_formats[i]) == 0)) format_type=ExplicitFormatType; i++; } if (format_type == UndefinedFormatType) (void) CopyMagickString(image_info->magick,magic,MagickPathExtent); else if (format_type == ExplicitFormatType) { image_info->affirm=MagickTrue; (void) CopyMagickString(image_info->magick,magic,MagickPathExtent); } if (LocaleCompare(magic,"RGB") == 0) image_info->affirm=MagickFalse; /* maybe SGI disguised as RGB */ } /* Look for explicit 'format:image' in filename. */ *magic='\0'; GetPathComponent(image_info->filename,MagickPath,magic); if (*magic == '\0') { (void) CopyMagickString(magic,image_info->magick,MagickPathExtent); magick_info=GetMagickInfo(magic,sans_exception); GetPathComponent(image_info->filename,CanonicalPath,component); (void) CopyMagickString(image_info->filename,component,MagickPathExtent); } else { const DelegateInfo *delegate_info; /* User specified image format. */ LocaleUpper(magic); magick_info=GetMagickInfo(magic,sans_exception); delegate_info=GetDelegateInfo(magic,"*",sans_exception); if (delegate_info == (const DelegateInfo *) NULL) delegate_info=GetDelegateInfo("*",magic,sans_exception); if (((magick_info != (const MagickInfo *) NULL) || (delegate_info != (const DelegateInfo *) NULL)) && (IsMagickConflict(magic) == MagickFalse)) { image_info->affirm=MagickTrue; (void) CopyMagickString(image_info->magick,magic,MagickPathExtent); GetPathComponent(image_info->filename,CanonicalPath,component); (void) CopyMagickString(image_info->filename,component, MagickPathExtent); } } sans_exception=DestroyExceptionInfo(sans_exception); if ((magick_info == (const MagickInfo *) NULL) || (GetMagickEndianSupport(magick_info) == MagickFalse)) image_info->endian=UndefinedEndian; if ((image_info->adjoin != MagickFalse) && (frames > 1)) { /* Test for multiple image support (e.g. image%02d.png). */ (void) InterpretImageFilename(image_info,(Image *) NULL, image_info->filename,(int) image_info->scene,component,exception); if ((LocaleCompare(component,image_info->filename) != 0) && (strchr(component,'%') == (char *) NULL)) image_info->adjoin=MagickFalse; } if ((image_info->adjoin != MagickFalse) && (frames > 0)) { /* Some image formats do not support multiple frames per file. */ magick_info=GetMagickInfo(magic,exception); if (magick_info != (const MagickInfo *) NULL) if (GetMagickAdjoin(magick_info) == MagickFalse) image_info->adjoin=MagickFalse; } if (image_info->affirm != MagickFalse) return(MagickTrue); if (frames == 0) { unsigned char *magick; size_t magick_size; /* Determine the image format from the first few bytes of the file. */ magick_size=GetMagicPatternExtent(exception); if (magick_size == 0) return(MagickFalse); image=AcquireImage(image_info,exception); (void) CopyMagickString(image->filename,image_info->filename, MagickPathExtent); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImage(image); return(MagickFalse); } if ((IsBlobSeekable(image) == MagickFalse) || (IsBlobExempt(image) != MagickFalse)) { /* Copy standard input or pipe to temporary file. */ *component='\0'; status=ImageToFile(image,component,exception); (void) CloseBlob(image); if (status == MagickFalse) { image=DestroyImage(image); return(MagickFalse); } SetImageInfoFile(image_info,(FILE *) NULL); (void) CopyMagickString(image->filename,component,MagickPathExtent); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImage(image); return(MagickFalse); } (void) CopyMagickString(image_info->filename,component, MagickPathExtent); image_info->temporary=MagickTrue; } magick=(unsigned char *) AcquireMagickMemory(magick_size); if (magick == (unsigned char *) NULL) { (void) CloseBlob(image); image=DestroyImage(image); return(MagickFalse); } (void) ResetMagickMemory(magick,0,magick_size); count=ReadBlob(image,magick_size,magick); (void) SeekBlob(image,-((MagickOffsetType) count),SEEK_CUR); (void) CloseBlob(image); image=DestroyImage(image); /* Check magic.xml configuration file. */ sans_exception=AcquireExceptionInfo(); magic_info=GetMagicInfo(magick,(size_t) count,sans_exception); magick=(unsigned char *) RelinquishMagickMemory(magick); if ((magic_info != (const MagicInfo *) NULL) && (GetMagicName(magic_info) != (char *) NULL)) { /* Try to use magick_info that was determined earlier by the extension */ if ((magick_info != (const MagickInfo *) NULL) && (GetMagickUseExtension(magick_info) != MagickFalse) && (LocaleCompare(magick_info->module,GetMagicName( magic_info)) == 0)) (void) CopyMagickString(image_info->magick,magick_info->name, MagickPathExtent); else { (void) CopyMagickString(image_info->magick,GetMagicName( magic_info),MagickPathExtent); magick_info=GetMagickInfo(image_info->magick,sans_exception); } if ((magick_info == (const MagickInfo *) NULL) || (GetMagickEndianSupport(magick_info) == MagickFalse)) image_info->endian=UndefinedEndian; sans_exception=DestroyExceptionInfo(sans_exception); return(MagickTrue); } magick_info=GetMagickInfo(image_info->magick,sans_exception); if ((magick_info == (const MagickInfo *) NULL) || (GetMagickEndianSupport(magick_info) == MagickFalse)) image_info->endian=UndefinedEndian; sans_exception=DestroyExceptionInfo(sans_exception); } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e I n f o B l o b % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageInfoBlob() sets the image info blob member. % % The format of the SetImageInfoBlob method is: % % void SetImageInfoBlob(ImageInfo *image_info,const void *blob, % const size_t length) % % A description of each parameter follows: % % o image_info: the image info. % % o blob: the blob. % % o length: the blob length. % */ MagickExport void SetImageInfoBlob(ImageInfo *image_info,const void *blob, const size_t length) { assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); image_info->blob=(void *) blob; image_info->length=length; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e I n f o F i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageInfoFile() sets the image info file member. % % The format of the SetImageInfoFile method is: % % void SetImageInfoFile(ImageInfo *image_info,FILE *file) % % A description of each parameter follows: % % o image_info: the image info. % % o file: the file. % */ MagickExport void SetImageInfoFile(ImageInfo *image_info,FILE *file) { assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); image_info->file=file; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageMask() associates a mask with the image. The mask must be the same % dimensions as the image. % % The format of the SetImageMask method is: % % MagickBooleanType SetImageMask(Image *image,const PixelMask type, % const Image *mask,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o type: the mask type, ReadPixelMask or WritePixelMask. % % o mask: the image mask. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageMask(Image *image,const PixelMask type, const Image *mask,ExceptionInfo *exception) { CacheView *mask_view, *image_view; MagickBooleanType status; ssize_t y; /* Set image mask. */ assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (mask == (const Image *) NULL) { switch (type) { case WritePixelMask: image->write_mask=MagickFalse; break; default: image->read_mask=MagickFalse; break; } return(SyncImagePixelCache(image,exception)); } switch (type) { case WritePixelMask: image->write_mask=MagickTrue; break; default: image->read_mask=MagickTrue; break; } if (SyncImagePixelCache(image,exception) == MagickFalse) return(MagickFalse); status=MagickTrue; mask_view=AcquireVirtualCacheView(mask,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(mask,image,1,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(mask_view,0,y,mask->columns,1,exception); q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType intensity; intensity=GetPixelIntensity(mask,p); switch (type) { case WritePixelMask: { SetPixelWriteMask(image,ClampToQuantum(QuantumRange-intensity),q); break; } default: { SetPixelReadMask(image,ClampToQuantum(QuantumRange-intensity),q); break; } } p+=GetPixelChannels(mask); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } mask_view=DestroyCacheView(mask_view); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e A l p h a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageAlpha() sets the alpha levels of the image. % % The format of the SetImageAlpha method is: % % MagickBooleanType SetImageAlpha(Image *image,const Quantum alpha, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o Alpha: the level of transparency: 0 is fully opaque and QuantumRange is % fully transparent. % */ MagickExport MagickBooleanType SetImageAlpha(Image *image,const Quantum alpha, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); image->alpha_trait=BlendPixelTrait; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelReadMask(image,q) != 0) SetPixelAlpha(image,alpha,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e V i r t u a l P i x e l M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageVirtualPixelMethod() sets the "virtual pixels" method for the % image and returns the previous setting. A virtual pixel is any pixel access % that is outside the boundaries of the image cache. % % The format of the SetImageVirtualPixelMethod() method is: % % VirtualPixelMethod SetImageVirtualPixelMethod(Image *image, % const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: choose the type of virtual pixel. % % o exception: return any errors or warnings in this structure. % */ MagickExport VirtualPixelMethod SetImageVirtualPixelMethod(Image *image, const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception) { assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); return(SetPixelCacheVirtualMethod(image,virtual_pixel_method,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S m u s h I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SmushImages() takes all images from the current image pointer to the end % of the image list and smushes them to each other top-to-bottom if the % stack parameter is true, otherwise left-to-right. % % The current gravity setting now effects how the image is justified in the % final image. % % The format of the SmushImages method is: % % Image *SmushImages(const Image *images,const MagickBooleanType stack, % ExceptionInfo *exception) % % A description of each parameter follows: % % o images: the image sequence. % % o stack: A value other than 0 stacks the images top-to-bottom. % % o offset: minimum distance in pixels between images. % % o exception: return any errors or warnings in this structure. % */ static ssize_t SmushXGap(const Image *smush_image,const Image *images, const ssize_t offset,ExceptionInfo *exception) { CacheView *left_view, *right_view; const Image *left_image, *right_image; RectangleInfo left_geometry, right_geometry; register const Quantum *p; register ssize_t i, y; size_t gap; ssize_t x; if (images->previous == (Image *) NULL) return(0); right_image=images; SetGeometry(smush_image,&right_geometry); GravityAdjustGeometry(right_image->columns,right_image->rows, right_image->gravity,&right_geometry); left_image=images->previous; SetGeometry(smush_image,&left_geometry); GravityAdjustGeometry(left_image->columns,left_image->rows, left_image->gravity,&left_geometry); gap=right_image->columns; left_view=AcquireVirtualCacheView(left_image,exception); right_view=AcquireVirtualCacheView(right_image,exception); for (y=0; y < (ssize_t) smush_image->rows; y++) { for (x=(ssize_t) left_image->columns-1; x > 0; x--) { p=GetCacheViewVirtualPixels(left_view,x,left_geometry.y+y,1,1,exception); if ((p == (const Quantum *) NULL) || (GetPixelAlpha(left_image,p) != TransparentAlpha) || ((left_image->columns-x-1) >= gap)) break; } i=(ssize_t) left_image->columns-x-1; for (x=0; x < (ssize_t) right_image->columns; x++) { p=GetCacheViewVirtualPixels(right_view,x,right_geometry.y+y,1,1, exception); if ((p == (const Quantum *) NULL) || (GetPixelAlpha(right_image,p) != TransparentAlpha) || ((x+i) >= (ssize_t) gap)) break; } if ((x+i) < (ssize_t) gap) gap=(size_t) (x+i); } right_view=DestroyCacheView(right_view); left_view=DestroyCacheView(left_view); if (y < (ssize_t) smush_image->rows) return(offset); return((ssize_t) gap-offset); } static ssize_t SmushYGap(const Image *smush_image,const Image *images, const ssize_t offset,ExceptionInfo *exception) { CacheView *bottom_view, *top_view; const Image *bottom_image, *top_image; RectangleInfo bottom_geometry, top_geometry; register const Quantum *p; register ssize_t i, x; size_t gap; ssize_t y; if (images->previous == (Image *) NULL) return(0); bottom_image=images; SetGeometry(smush_image,&bottom_geometry); GravityAdjustGeometry(bottom_image->columns,bottom_image->rows, bottom_image->gravity,&bottom_geometry); top_image=images->previous; SetGeometry(smush_image,&top_geometry); GravityAdjustGeometry(top_image->columns,top_image->rows,top_image->gravity, &top_geometry); gap=bottom_image->rows; top_view=AcquireVirtualCacheView(top_image,exception); bottom_view=AcquireVirtualCacheView(bottom_image,exception); for (x=0; x < (ssize_t) smush_image->columns; x++) { for (y=(ssize_t) top_image->rows-1; y > 0; y--) { p=GetCacheViewVirtualPixels(top_view,top_geometry.x+x,y,1,1,exception); if ((p == (const Quantum *) NULL) || (GetPixelAlpha(top_image,p) != TransparentAlpha) || ((top_image->rows-y-1) >= gap)) break; } i=(ssize_t) top_image->rows-y-1; for (y=0; y < (ssize_t) bottom_image->rows; y++) { p=GetCacheViewVirtualPixels(bottom_view,bottom_geometry.x+x,y,1,1, exception); if ((p == (const Quantum *) NULL) || (GetPixelAlpha(bottom_image,p) != TransparentAlpha) || ((y+i) >= (ssize_t) gap)) break; } if ((y+i) < (ssize_t) gap) gap=(size_t) (y+i); } bottom_view=DestroyCacheView(bottom_view); top_view=DestroyCacheView(top_view); if (x < (ssize_t) smush_image->columns) return(offset); return((ssize_t) gap-offset); } MagickExport Image *SmushImages(const Image *images, const MagickBooleanType stack,const ssize_t offset,ExceptionInfo *exception) { #define SmushImageTag "Smush/Image" const Image *image; Image *smush_image; MagickBooleanType proceed, status; MagickOffsetType n; PixelTrait alpha_trait; RectangleInfo geometry; register const Image *next; size_t height, number_images, width; ssize_t x_offset, y_offset; /* Compute maximum area of smushed area. */ assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=images; alpha_trait=image->alpha_trait; number_images=1; width=image->columns; height=image->rows; next=GetNextImageInList(image); for ( ; next != (Image *) NULL; next=GetNextImageInList(next)) { if (next->alpha_trait != UndefinedPixelTrait) alpha_trait=BlendPixelTrait; number_images++; if (stack != MagickFalse) { if (next->columns > width) width=next->columns; height+=next->rows; if (next->previous != (Image *) NULL) height+=offset; continue; } width+=next->columns; if (next->previous != (Image *) NULL) width+=offset; if (next->rows > height) height=next->rows; } /* Smush images. */ smush_image=CloneImage(image,width,height,MagickTrue,exception); if (smush_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(smush_image,DirectClass,exception) == MagickFalse) { smush_image=DestroyImage(smush_image); return((Image *) NULL); } smush_image->alpha_trait=alpha_trait; (void) SetImageBackgroundColor(smush_image,exception); status=MagickTrue; x_offset=0; y_offset=0; for (n=0; n < (MagickOffsetType) number_images; n++) { SetGeometry(smush_image,&geometry); GravityAdjustGeometry(image->columns,image->rows,image->gravity,&geometry); if (stack != MagickFalse) { x_offset-=geometry.x; y_offset-=SmushYGap(smush_image,image,offset,exception); } else { x_offset-=SmushXGap(smush_image,image,offset,exception); y_offset-=geometry.y; } status=CompositeImage(smush_image,image,OverCompositeOp,MagickTrue,x_offset, y_offset,exception); proceed=SetImageProgress(image,SmushImageTag,n,number_images); if (proceed == MagickFalse) break; if (stack == MagickFalse) { x_offset+=(ssize_t) image->columns; y_offset=0; } else { x_offset=0; y_offset+=(ssize_t) image->rows; } image=GetNextImageInList(image); } if (stack == MagickFalse) smush_image->columns=(size_t) x_offset; else smush_image->rows=(size_t) y_offset; if (status == MagickFalse) smush_image=DestroyImage(smush_image); return(smush_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S t r i p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % StripImage() strips an image of all profiles and comments. % % The format of the StripImage method is: % % MagickBooleanType StripImage(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType StripImage(Image *image,ExceptionInfo *exception) { MagickBooleanType status; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); (void) exception; DestroyImageProfiles(image); (void) DeleteImageProperty(image,"comment"); (void) DeleteImageProperty(image,"date:create"); (void) DeleteImageProperty(image,"date:modify"); status=SetImageArtifact(image,"png:exclude-chunk", "bKGD,cHRM,EXIF,gAMA,iCCP,iTXt,sRGB,tEXt,zCCP,zTXt,date"); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncImage() initializes the red, green, and blue intensities of each pixel % as defined by the colormap index. % % The format of the SyncImage method is: % % MagickBooleanType SyncImage(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static inline Quantum PushColormapIndex(Image *image,const Quantum index, MagickBooleanType *range_exception) { if ((size_t) index < image->colors) return(index); *range_exception=MagickTrue; return((Quantum) 0); } MagickExport MagickBooleanType SyncImage(Image *image,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType range_exception, status, taint; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (image->storage_class == DirectClass) return(MagickFalse); range_exception=MagickFalse; status=MagickTrue; taint=image->taint; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(range_exception,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum index; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { index=PushColormapIndex(image,GetPixelIndex(image,q),&range_exception); SetPixelViaPixelInfo(image,image->colormap+(ssize_t) index,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); image->taint=taint; if ((image->ping == MagickFalse) && (range_exception != MagickFalse)) (void) ThrowMagickException(exception,GetMagickModule(), CorruptImageWarning,"InvalidColormapIndex","`%s'",image->filename); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S y n c I m a g e S e t t i n g s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncImageSettings() syncs any image_info global options into per-image % attributes. % % Note: in IMv6 free form 'options' were always mapped into 'artifacts', so % that operations and coders can find such settings. In IMv7 if a desired % per-image artifact is not set, then it will directly look for a global % option as a fallback, as such this copy is no longer needed, only the % link set up. % % The format of the SyncImageSettings method is: % % MagickBooleanType SyncImageSettings(const ImageInfo *image_info, % Image *image,ExceptionInfo *exception) % MagickBooleanType SyncImagesSettings(const ImageInfo *image_info, % Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SyncImagesSettings(ImageInfo *image_info, Image *images,ExceptionInfo *exception) { Image *image; assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); image=images; for ( ; image != (Image *) NULL; image=GetNextImageInList(image)) (void) SyncImageSettings(image_info,image,exception); (void) DeleteImageOption(image_info,"page"); return(MagickTrue); } MagickExport MagickBooleanType SyncImageSettings(const ImageInfo *image_info, Image *image,ExceptionInfo *exception) { const char *option; GeometryInfo geometry_info; MagickStatusType flags; ResolutionType units; /* Sync image options. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); option=GetImageOption(image_info,"alpha-color"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&image->alpha_color, exception); option=GetImageOption(image_info,"background"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&image->background_color, exception); option=GetImageOption(image_info,"black-point-compensation"); if (option != (const char *) NULL) image->black_point_compensation=(MagickBooleanType) ParseCommandOption( MagickBooleanOptions,MagickFalse,option); option=GetImageOption(image_info,"blue-primary"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); image->chromaticity.blue_primary.x=geometry_info.rho; image->chromaticity.blue_primary.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->chromaticity.blue_primary.y=image->chromaticity.blue_primary.x; } option=GetImageOption(image_info,"bordercolor"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&image->border_color, exception); /* FUTURE: do not sync compose to per-image compose setting here */ option=GetImageOption(image_info,"compose"); if (option != (const char *) NULL) image->compose=(CompositeOperator) ParseCommandOption(MagickComposeOptions, MagickFalse,option); /* -- */ option=GetImageOption(image_info,"compress"); if (option != (const char *) NULL) image->compression=(CompressionType) ParseCommandOption( MagickCompressOptions,MagickFalse,option); option=GetImageOption(image_info,"debug"); if (option != (const char *) NULL) image->debug=(MagickBooleanType) ParseCommandOption(MagickBooleanOptions, MagickFalse,option); option=GetImageOption(image_info,"density"); if (option != (const char *) NULL) { GeometryInfo geometry_info; flags=ParseGeometry(option,&geometry_info); image->resolution.x=geometry_info.rho; image->resolution.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->resolution.y=image->resolution.x; } option=GetImageOption(image_info,"depth"); if (option != (const char *) NULL) image->depth=StringToUnsignedLong(option); option=GetImageOption(image_info,"endian"); if (option != (const char *) NULL) image->endian=(EndianType) ParseCommandOption(MagickEndianOptions, MagickFalse,option); option=GetImageOption(image_info,"filter"); if (option != (const char *) NULL) image->filter=(FilterType) ParseCommandOption(MagickFilterOptions, MagickFalse,option); option=GetImageOption(image_info,"fuzz"); if (option != (const char *) NULL) image->fuzz=StringToDoubleInterval(option,(double) QuantumRange+1.0); option=GetImageOption(image_info,"gravity"); if (option != (const char *) NULL) image->gravity=(GravityType) ParseCommandOption(MagickGravityOptions, MagickFalse,option); option=GetImageOption(image_info,"green-primary"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); image->chromaticity.green_primary.x=geometry_info.rho; image->chromaticity.green_primary.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->chromaticity.green_primary.y=image->chromaticity.green_primary.x; } option=GetImageOption(image_info,"intent"); if (option != (const char *) NULL) image->rendering_intent=(RenderingIntent) ParseCommandOption( MagickIntentOptions,MagickFalse,option); option=GetImageOption(image_info,"intensity"); if (option != (const char *) NULL) image->intensity=(PixelIntensityMethod) ParseCommandOption( MagickPixelIntensityOptions,MagickFalse,option); option=GetImageOption(image_info,"interlace"); if (option != (const char *) NULL) image->interlace=(InterlaceType) ParseCommandOption(MagickInterlaceOptions, MagickFalse,option); option=GetImageOption(image_info,"interpolate"); if (option != (const char *) NULL) image->interpolate=(PixelInterpolateMethod) ParseCommandOption( MagickInterpolateOptions,MagickFalse,option); option=GetImageOption(image_info,"loop"); if (option != (const char *) NULL) image->iterations=StringToUnsignedLong(option); option=GetImageOption(image_info,"orient"); if (option != (const char *) NULL) image->orientation=(OrientationType) ParseCommandOption( MagickOrientationOptions,MagickFalse,option); option=GetImageOption(image_info,"page"); if (option != (const char *) NULL) { char *geometry; geometry=GetPageGeometry(option); flags=ParseAbsoluteGeometry(geometry,&image->page); geometry=DestroyString(geometry); } option=GetImageOption(image_info,"quality"); if (option != (const char *) NULL) image->quality=StringToUnsignedLong(option); option=GetImageOption(image_info,"red-primary"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); image->chromaticity.red_primary.x=geometry_info.rho; image->chromaticity.red_primary.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->chromaticity.red_primary.y=image->chromaticity.red_primary.x; } if (image_info->quality != UndefinedCompressionQuality) image->quality=image_info->quality; option=GetImageOption(image_info,"scene"); if (option != (const char *) NULL) image->scene=StringToUnsignedLong(option); option=GetImageOption(image_info,"taint"); if (option != (const char *) NULL) image->taint=(MagickBooleanType) ParseCommandOption(MagickBooleanOptions, MagickFalse,option); option=GetImageOption(image_info,"tile-offset"); if (option != (const char *) NULL) { char *geometry; geometry=GetPageGeometry(option); flags=ParseAbsoluteGeometry(geometry,&image->tile_offset); geometry=DestroyString(geometry); } option=GetImageOption(image_info,"transparent-color"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&image->transparent_color, exception); option=GetImageOption(image_info,"type"); if (option != (const char *) NULL) image->type=(ImageType) ParseCommandOption(MagickTypeOptions,MagickFalse, option); option=GetImageOption(image_info,"units"); units=image_info->units; if (option != (const char *) NULL) units=(ResolutionType) ParseCommandOption(MagickResolutionOptions, MagickFalse,option); if (units != UndefinedResolution) { if (image->units != units) switch (image->units) { case PixelsPerInchResolution: { if (units == PixelsPerCentimeterResolution) { image->resolution.x/=2.54; image->resolution.y/=2.54; } break; } case PixelsPerCentimeterResolution: { if (units == PixelsPerInchResolution) { image->resolution.x=(double) ((size_t) (100.0*2.54* image->resolution.x+0.5))/100.0; image->resolution.y=(double) ((size_t) (100.0*2.54* image->resolution.y+0.5))/100.0; } break; } default: break; } image->units=units; } option=GetImageOption(image_info,"virtual-pixel"); if (option != (const char *) NULL) (void) SetImageVirtualPixelMethod(image,(VirtualPixelMethod) ParseCommandOption(MagickVirtualPixelOptions,MagickFalse,option), exception); option=GetImageOption(image_info,"white-point"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); image->chromaticity.white_point.x=geometry_info.rho; image->chromaticity.white_point.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->chromaticity.white_point.y=image->chromaticity.white_point.x; } /* Pointer to allow the lookup of pre-image artifact will fallback to a global option setting/define. This saves a lot of duplication of global options into per-image artifacts, while ensuring only specifically set per-image artifacts are preserved when parenthesis ends. */ if (image->image_info != (ImageInfo *) NULL) image->image_info=DestroyImageInfo(image->image_info); image->image_info=CloneImageInfo(image_info); return(MagickTrue); }
pr51360.c
/* PR c/51360 */ /* { dg-do compile } */ /* { dg-options "-Wunused -W -fopenmp" } */ void foo (int a, int b, int c, int d) { int m, n, o, p, i; m = 6; n = 1; o = 5; p = 1; a = 6; b = 1; c = 5; d = 1; #pragma omp parallel for num_threads (m) if (n) schedule (static, o) for (i = 0; i < 10; i++) ; #pragma omp parallel for num_threads (a) if (b) schedule (static, c) for (i = 0; i < 10; i++) ; #pragma omp task final (p) ; #pragma omp task final (d) ; }
omp_loop_static.h
// -*- C++ -*- // Copyright (C) 2007, 2008, 2009, 2010 Free Software Foundation, Inc. // // This file is part of the GNU ISO C++ Library. This library is free // software; you can redistribute it and/or modify it under the terms // of the GNU General Public License as published by the Free Software // Foundation; either version 3, or (at your option) any later // version. // This library is distributed in the hope that it will be useful, but // WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU // General Public License for more details. // Under Section 7 of GPL version 3, you are granted additional // permissions described in the GCC Runtime Library Exception, version // 3.1, as published by the Free Software Foundation. // You should have received a copy of the GNU General Public License and // a copy of the GCC Runtime Library Exception along with this program; // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see // <http://www.gnu.org/licenses/>. /** @file parallel/omp_loop_static.h * @brief Parallelization of embarrassingly parallel execution by * means of an OpenMP for loop with static scheduling. * This file is a GNU parallel extension to the Standard C++ Library. */ // Written by Felix Putze. #ifndef _GLIBCXX_PARALLEL_OMP_LOOP_STATIC_H #define _GLIBCXX_PARALLEL_OMP_LOOP_STATIC_H 1 #include <omp.h> #include <parallel/settings.h> #include <parallel/basic_iterator.h> namespace __gnu_parallel { /** @brief Embarrassingly parallel algorithm for random access * iterators, using an OpenMP for loop with static scheduling. * * @param __begin Begin iterator of element sequence. * @param __end End iterator of element sequence. * @param __o User-supplied functor (comparator, predicate, adding * functor, ...). * @param __f Functor to @a process an element with __op (depends on * desired functionality, e. g. for std::for_each(), ...). * @param __r Functor to @a add a single __result to the already processed * __elements (depends on functionality). * @param __base Base value for reduction. * @param __output Pointer to position where final result is written to * @param __bound Maximum number of elements processed (e. g. for * std::count_n()). * @return User-supplied functor (that may contain a part of the result). */ template<typename _RAIter, typename _Op, typename _Fu, typename _Red, typename _Result> _Op __for_each_template_random_access_omp_loop_static(_RAIter __begin, _RAIter __end, _Op __o, _Fu& __f, _Red __r, _Result __base, _Result& __output, typename std::iterator_traits<_RAIter>::difference_type __bound) { typedef typename std::iterator_traits<_RAIter>::difference_type _DifferenceType; _DifferenceType __length = __end - __begin; _ThreadIndex __num_threads = std::min<_DifferenceType> (__get_max_threads(), __length); _Result *__thread_results; # pragma omp parallel num_threads(__num_threads) { # pragma omp single { __num_threads = omp_get_num_threads(); __thread_results = new _Result[__num_threads]; for (_ThreadIndex __i = 0; __i < __num_threads; ++__i) __thread_results[__i] = _Result(); } _ThreadIndex __iam = omp_get_thread_num(); #pragma omp for schedule(static, _Settings::get().workstealing_chunk_size) for (_DifferenceType __pos = 0; __pos < __length; ++__pos) __thread_results[__iam] = __r(__thread_results[__iam], __f(__o, __begin+__pos)); } //parallel for (_ThreadIndex __i = 0; __i < __num_threads; ++__i) __output = __r(__output, __thread_results[__i]); delete [] __thread_results; // Points to last element processed (needed as return value for // some algorithms like transform). __f.finish_iterator = __begin + __length; return __o; } } // end namespace #endif /* _GLIBCXX_PARALLEL_OMP_LOOP_STATIC_H */
GB_binop__bor_uint8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bor_uint8) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__bor_uint8) // A.*B function (eWiseMult): GB (_AemultB_03__bor_uint8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bor_uint8) // A*D function (colscale): GB (_AxD__bor_uint8) // D*A function (rowscale): GB (_DxB__bor_uint8) // C+=B function (dense accum): GB (_Cdense_accumB__bor_uint8) // C+=b function (dense accum): GB (_Cdense_accumb__bor_uint8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bor_uint8) // C=scalar+B GB (_bind1st__bor_uint8) // C=scalar+B' GB (_bind1st_tran__bor_uint8) // C=A+scalar GB (_bind2nd__bor_uint8) // C=A'+scalar GB (_bind2nd_tran__bor_uint8) // C type: uint8_t // A type: uint8_t // B,b type: uint8_t // BinaryOp: cij = (aij) | (bij) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ uint8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint8_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x) | (y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BOR || GxB_NO_UINT8 || GxB_NO_BOR_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__bor_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bor_uint8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bor_uint8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__bor_uint8) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__bor_uint8) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bor_uint8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__bor_uint8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bor_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__bor_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bor_uint8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bor_uint8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; uint8_t bij = Bx [p] ; Cx [p] = (x) | (bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bor_uint8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint8_t aij = Ax [p] ; Cx [p] = (aij) | (y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = Ax [pA] ; \ Cx [pC] = (x) | (aij) ; \ } GrB_Info GB (_bind1st_tran__bor_uint8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = Ax [pA] ; \ Cx [pC] = (aij) | (y) ; \ } GrB_Info GB (_bind2nd_tran__bor_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
core_zsyr2k.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @precisions normal z -> c d s * **/ #include <plasma_core_blas.h> #include "plasma_types.h" #include "core_lapack.h" /***************************************************************************//** * * @ingroup core_syr2k * * Performs one of the symmetric rank 2k operations * * \f[ C = \alpha A \times B^T + \alpha B \times A^T + \beta C, \f] * or * \f[ C = \alpha A^T \times B + \alpha B^T \times A + \beta C, \f] * * where alpha and beta are scalars, * C is an n-by-n symmetric matrix, and A and B are n-by-k matrices * in the first case and k-by-n matrices in the second case. * ******************************************************************************* * * @param[in] uplo * - PlasmaUpper: Upper triangle of C is stored; * - PlasmaLower: Lower triangle of C is stored. * * @param[in] trans * - PlasmaNoTrans: * \f[ C = \alpha A \times B^T + \alpha B \times A^T + \beta C; \f] * - PlasmaTrans: * \f[ C = \alpha A^T \times B + \alpha B^T \times A + \beta C. \f] * * @param[in] n * The order of the matrix C. n >= zero. * * @param[in] k * If trans = PlasmaNoTrans, number of columns of the A and B matrices; * if trans = PlasmaTrans, number of rows of the A and B matrices. * * @param[in] alpha * The scalar alpha. * * @param[in] A * An lda-by-ka matrix. * If trans = PlasmaNoTrans, ka = k; * if trans = PlasmaTrans, ka = n. * * @param[in] lda * The leading dimension of the array A. * If trans = PlasmaNoTrans, lda >= max(1, n); * if trans = PlasmaTrans, lda >= max(1, k). * * @param[in] B * An ldb-by-kb matrix. * If trans = PlasmaNoTrans, kb = k; * if trans = PlasmaTrans, kb = n. * * @param[in] ldb * The leading dimension of the array B. * If trans = PlasmaNoTrans, ldb >= max(1, n); * if trans = PlasmaTrans, ldb >= max(1, k). * * @param[in] beta * The scalar beta. * * @param[in,out] C * An ldc-by-n matrix. * On exit, the uplo part of the matrix is overwritten * by the uplo part of the updated matrix. * * @param[in] ldc * The leading dimension of the array C. ldc >= max(1, n). * ******************************************************************************/ __attribute__((weak)) void plasma_core_zsyr2k(plasma_enum_t uplo, plasma_enum_t trans, int n, int k, plasma_complex64_t alpha, const plasma_complex64_t *A, int lda, const plasma_complex64_t *B, int ldb, plasma_complex64_t beta, plasma_complex64_t *C, int ldc) { cblas_zsyr2k(CblasColMajor, (CBLAS_UPLO)uplo, (CBLAS_TRANSPOSE)trans, n, k, CBLAS_SADDR(alpha), A, lda, B, ldb, CBLAS_SADDR(beta), C, ldc); } /******************************************************************************/ void plasma_core_omp_zsyr2k( plasma_enum_t uplo, plasma_enum_t trans, int n, int k, plasma_complex64_t alpha, const plasma_complex64_t *A, int lda, const plasma_complex64_t *B, int ldb, plasma_complex64_t beta, plasma_complex64_t *C, int ldc, plasma_sequence_t *sequence, plasma_request_t *request) { int ak; int bk; if (trans == PlasmaNoTrans) { ak = k; bk = k; } else { ak = n; bk = n; } #pragma omp task depend(in:A[0:lda*ak]) \ depend(in:B[0:ldb*bk]) \ depend(inout:C[0:ldc*n]) { if (sequence->status == PlasmaSuccess) plasma_core_zsyr2k(uplo, trans, n, k, alpha, A, lda, B, ldb, beta, C, ldc); } }
grid_ao_drv.c
/* * Author: Qiming Sun <osirpt.sun@gmail.com> */ #include <stdlib.h> #include <string.h> #include <math.h> #include "config.h" #include "grid_ao_drv.h" #define MIN(X,Y) ((X)<(Y)?(X):(Y)) #define MAX(X,Y) ((X)>(Y)?(X):(Y)) double exp_cephes(double x); double CINTcommon_fac_sp(int l); void GTOnabla1(double *fx1, double *fy1, double *fz1, double *fx0, double *fy0, double *fz0, int l, double a) { int i, n; double a2 = -2 * a; for (n = 0; n < SIMDD; n++) { fx1[n] = a2*fx0[SIMDD+n]; fy1[n] = a2*fy0[SIMDD+n]; fz1[n] = a2*fz0[SIMDD+n]; } for (i = 1; i <= l; i++) { for (n = 0; n < SIMDD; n++) { fx1[i*SIMDD+n] = i*fx0[(i-1)*SIMDD+n] + a2*fx0[(i+1)*SIMDD+n]; fy1[i*SIMDD+n] = i*fy0[(i-1)*SIMDD+n] + a2*fy0[(i+1)*SIMDD+n]; fz1[i*SIMDD+n] = i*fz0[(i-1)*SIMDD+n] + a2*fz0[(i+1)*SIMDD+n]; } } } /* * r - R_O = (r-R_i) + ri, ri = (x,y,z) = R_i - R_O */ void GTOx1(double *fx1, double *fy1, double *fz1, double *fx0, double *fy0, double *fz0, int l, double *ri) { int i, n; for (i = 0; i <= l; i++) { for (n = 0; n < SIMDD; n++) { fx1[i*SIMDD+n] = ri[0] * fx0[i*SIMDD+n] + fx0[(i+1)*SIMDD+n]; fy1[i*SIMDD+n] = ri[1] * fy0[i*SIMDD+n] + fy0[(i+1)*SIMDD+n]; fz1[i*SIMDD+n] = ri[2] * fz0[i*SIMDD+n] + fz0[(i+1)*SIMDD+n]; } } } int GTOprim_exp(double *eprim, double *coord, double *alpha, double *coeff, int l, int nprim, int nctr, int ngrids, double fac) { int i, j; double arr, maxc; double logcoeff[nprim]; double rr[ngrids]; double *gridx = coord; double *gridy = coord+BLKSIZE; double *gridz = coord+BLKSIZE*2; int not0 = 0; // the maximum value of the coefficients for each pGTO for (j = 0; j < nprim; j++) { maxc = 0; for (i = 0; i < nctr; i++) { maxc = MAX(maxc, fabs(coeff[i*nprim+j])); } logcoeff[j] = log(maxc); } for (i = 0; i < ngrids; i++) { rr[i] = gridx[i]*gridx[i] + gridy[i]*gridy[i] + gridz[i]*gridz[i]; } for (j = 0; j < nprim; j++) { for (i = 0; i < ngrids; i++) { arr = alpha[j] * rr[i]; if (arr-logcoeff[j] < EXPCUTOFF) { eprim[j*BLKSIZE+i] = exp_cephes(-arr) * fac; not0 = 1; } else { eprim[j*BLKSIZE+i] = 0; } } } return not0; } // grid2atm[atm_id,xyz,grid_id] static void _fill_grid2atm(double *grid2atm, double *coord, int bgrids, int ngrids, int *atm, int natm, int *bas, int nbas, double *env) { int atm_id, ig; double *r_atm; for (atm_id = 0; atm_id < natm; atm_id++) { r_atm = env + atm[PTR_COORD+atm_id*ATM_SLOTS]; for (ig = 0; ig < bgrids; ig++) { grid2atm[0*BLKSIZE+ig] = coord[0*ngrids+ig] - r_atm[0]; grid2atm[1*BLKSIZE+ig] = coord[1*ngrids+ig] - r_atm[1]; grid2atm[2*BLKSIZE+ig] = coord[2*ngrids+ig] - r_atm[2]; } grid2atm += 3*BLKSIZE; } } static void _dset0(double *out, int odim, int bgrids, int counts) { int i, j; for (i = 0; i < counts; i++) { for (j = 0; j < bgrids; j++) { out[i*odim+j] = 0; } } } static void _zset0(double complex *out, int odim, int bgrids, int counts) { int i, j; for (i = 0; i < counts; i++) { for (j = 0; j < bgrids; j++) { out[i*odim+j] = 0; } } } void GTOeval_sph_iter(void (*feval)(), int (*fexp)(), double fac, int nao, int ngrids, int bgrids, int param[], int *shls_slice, int *ao_loc, double *buf, double *ao, double *coord, char *non0table, int *atm, int natm, int *bas, int nbas, double *env) { const int ncomp = param[TENSOR]; const int sh0 = shls_slice[0]; const int sh1 = shls_slice[1]; const int atmstart = bas[sh0*BAS_SLOTS+ATOM_OF]; const int atmend = bas[(sh1-1)*BAS_SLOTS+ATOM_OF]+1; const int atmcount = atmend - atmstart; const size_t Ngrids = ngrids; int i, k, l, np, nc, atm_id, bas_id, deg, dcart, ao_id; double fac1; double *p_exp, *pcoeff, *pcoord, *pcart, *ri, *pao; double *grid2atm = buf; // [atm_id,xyz,grid] double *eprim = grid2atm + atmcount*3*BLKSIZE; double *cart_gto = eprim + NPRIMAX*BLKSIZE*2; _fill_grid2atm(grid2atm, coord, bgrids, ngrids, atm+atmstart*ATM_SLOTS, atmcount, bas, nbas, env); for (bas_id = sh0; bas_id < sh1; bas_id++) { np = bas[bas_id*BAS_SLOTS+NPRIM_OF]; nc = bas[bas_id*BAS_SLOTS+NCTR_OF ]; l = bas[bas_id*BAS_SLOTS+ANG_OF ]; deg = l * 2 + 1; fac1 = fac * CINTcommon_fac_sp(l); p_exp = env + bas[bas_id*BAS_SLOTS+PTR_EXP]; pcoeff = env + bas[bas_id*BAS_SLOTS+PTR_COEFF]; atm_id = bas[bas_id*BAS_SLOTS+ATOM_OF]; pcoord = grid2atm + (atm_id - atmstart) * 3*BLKSIZE; ao_id = ao_loc[bas_id] - ao_loc[sh0]; if (non0table[bas_id] && (*fexp)(eprim, pcoord, p_exp, pcoeff, l, np, nc, bgrids, fac1)) { dcart = (l+1)*(l+2)/2; ri = env + atm[PTR_COORD+atm_id*ATM_SLOTS]; if (l <= 1) { // s, p functions (*feval)(ao+ao_id*Ngrids, ri, eprim, pcoord, p_exp, pcoeff, env, l, np, nc, nao, ngrids, bgrids); } else { (*feval)(cart_gto, ri, eprim, pcoord, p_exp, pcoeff, env, l, np, nc, nc*dcart, bgrids, bgrids); pcart = cart_gto; for (i = 0; i < ncomp; i++) { pao = ao + (i*nao+ao_id)*Ngrids; for (k = 0; k < nc; k++) { CINTc2s_ket_sph1(pao, pcart, ngrids, bgrids, l); pao += deg * ngrids; pcart += dcart * bgrids; } } } } else { for (i = 0; i < ncomp; i++) { _dset0(ao+(i*nao+ao_id)*Ngrids, ngrids, bgrids, nc*deg); } } } } void GTOeval_cart_iter(void (*feval)(), int (*fexp)(), double fac, int nao, int ngrids, int bgrids, int param[], int *shls_slice, int *ao_loc, double *buf, double *ao, double *coord, char *non0table, int *atm, int natm, int *bas, int nbas, double *env) { const int ncomp = param[TENSOR]; const int sh0 = shls_slice[0]; const int sh1 = shls_slice[1]; const int atmstart = bas[sh0*BAS_SLOTS+ATOM_OF]; const int atmend = bas[(sh1-1)*BAS_SLOTS+ATOM_OF]+1; const int atmcount = atmend - atmstart; const size_t Ngrids = ngrids; int i, k, l, np, nc, atm_id, bas_id, deg, ao_id; double fac1; double *p_exp, *pcoeff, *pcoord, *pcart, *ri, *pao; double *grid2atm = buf; // [atm_id,xyz,grid] double *eprim = grid2atm + atmcount*3*BLKSIZE; _fill_grid2atm(grid2atm, coord, bgrids, ngrids, atm+atmstart*ATM_SLOTS, atmcount, bas, nbas, env); for (bas_id = sh0; bas_id < sh1; bas_id++) { np = bas[bas_id*BAS_SLOTS+NPRIM_OF]; nc = bas[bas_id*BAS_SLOTS+NCTR_OF ]; l = bas[bas_id*BAS_SLOTS+ANG_OF ]; deg = (l+1)*(l+2)/2; fac1 = fac * CINTcommon_fac_sp(l); p_exp = env + bas[bas_id*BAS_SLOTS+PTR_EXP]; pcoeff = env + bas[bas_id*BAS_SLOTS+PTR_COEFF]; atm_id = bas[bas_id*BAS_SLOTS+ATOM_OF]; pcoord = grid2atm + (atm_id - atmstart) * 3*BLKSIZE; ao_id = ao_loc[bas_id] - ao_loc[sh0]; if (non0table[bas_id] && (*fexp)(eprim, pcoord, p_exp, pcoeff, l, np, nc, bgrids, fac1)) { ri = env + atm[PTR_COORD+atm_id*ATM_SLOTS]; (*feval)(ao+ao_id*Ngrids, ri, eprim, pcoord, p_exp, pcoeff, env, l, np, nc, nao, ngrids, bgrids); } else { for (i = 0; i < ncomp; i++) { _dset0(ao+(i*nao+ao_id)*Ngrids, ngrids, bgrids, nc*deg); } } } } void GTOeval_spinor_iter(void (*feval)(), int (*fexp)(), void (*c2s)(), double fac, int nao, int ngrids, int bgrids, int param[], int *shls_slice, int *ao_loc, double *buf, double complex *ao, double *coord, char *non0table, int *atm, int natm, int *bas, int nbas, double *env) { const int ncomp_e1 = param[POS_E1]; const int ncomp = param[TENSOR]; const int sh0 = shls_slice[0]; const int sh1 = shls_slice[1]; const int atmstart = bas[sh0*BAS_SLOTS+ATOM_OF]; const int atmend = bas[(sh1-1)*BAS_SLOTS+ATOM_OF]+1; const int atmcount = atmend - atmstart; const size_t Ngrids = ngrids; int i, k, l, np, nc, atm_id, bas_id, deg, kappa, dcart, ao_id; size_t off; double fac1; double *p_exp, *pcoeff, *pcoord, *pcart, *ri; double complex *aoa = ao; double complex *aob = ao + ncomp*nao*ngrids; double *grid2atm = buf; // [atm_id,xyz,grid] double *eprim = grid2atm + atmcount*3*BLKSIZE; double *cart_gto = eprim + NPRIMAX*BLKSIZE*2; _fill_grid2atm(grid2atm, coord, bgrids, ngrids, atm+atmstart*ATM_SLOTS, atmcount, bas, nbas, env); for (bas_id = sh0; bas_id < sh1; bas_id++) { np = bas[bas_id*BAS_SLOTS+NPRIM_OF]; nc = bas[bas_id*BAS_SLOTS+NCTR_OF ]; l = bas[bas_id*BAS_SLOTS+ANG_OF ]; deg = CINTlen_spinor(bas_id, bas); fac1 = fac * CINTcommon_fac_sp(l); p_exp = env + bas[bas_id*BAS_SLOTS+PTR_EXP]; pcoeff = env + bas[bas_id*BAS_SLOTS+PTR_COEFF]; atm_id = bas[bas_id*BAS_SLOTS+ATOM_OF]; pcoord = grid2atm + (atm_id - atmstart) * 3*BLKSIZE; ao_id = ao_loc[bas_id] - ao_loc[sh0]; if (non0table[bas_id] && (*fexp)(eprim, pcoord, p_exp, pcoeff, l, np, nc, bgrids, fac1)) { kappa = bas[bas_id*BAS_SLOTS+KAPPA_OF]; dcart = (l+1)*(l+2)/2; ri = env + atm[PTR_COORD+atm_id*ATM_SLOTS]; (*feval)(cart_gto, ri, eprim, pcoord, p_exp, pcoeff, env, l, np, nc, nc*dcart, bgrids, bgrids); for (i = 0; i < ncomp; i++) { pcart = cart_gto + i * nc*dcart*bgrids*ncomp_e1; off = (i*nao+ao_id)*Ngrids; (*c2s)(aoa+off, aob+off, pcart, ngrids, bgrids, nc, kappa, l); } } else { for (i = 0; i < ncomp; i++) { off = (i*nao+ao_id)*Ngrids; _zset0(aoa+off, ngrids, bgrids, nc*deg); _zset0(aob+off, ngrids, bgrids, nc*deg); } } } } int GTOshloc_by_atom(int *shloc, int *shls_slice, int *ao_loc, int *atm, int *bas) { const int sh0 = shls_slice[0]; const int sh1 = shls_slice[1]; int ish, nshblk, lastatm; shloc[0] = sh0; nshblk = 1; lastatm = bas[BAS_SLOTS*sh0+ATOM_OF]; for (ish = sh0; ish < sh1; ish++) { if (lastatm != bas[BAS_SLOTS*ish+ATOM_OF]) { lastatm = bas[BAS_SLOTS*ish+ATOM_OF]; shloc[nshblk] = ish; nshblk++; } } shloc[nshblk] = sh1; return nshblk; } /* * non0table[ngrids/blksize,natm] is the T/F table for ao values to * screen the ao evaluation for each shell */ void GTOeval_loop(void (*fiter)(), void (*feval)(), int (*fexp)(), double fac, int ngrids, int param[], int *shls_slice, int *ao_loc, double *ao, double *coord, char *non0table, int *atm, int natm, int *bas, int nbas, double *env) { int shloc[shls_slice[1]-shls_slice[0]+1]; const int nshblk = GTOshloc_by_atom(shloc, shls_slice, ao_loc, atm, bas); const int nblk = (ngrids+BLKSIZE-1) / BLKSIZE; const size_t Ngrids = ngrids; #pragma omp parallel default(none) \ shared(fiter, feval, fexp, fac, param, ao_loc, shls_slice, ngrids, \ ao, coord, non0table, atm, natm, bas, nbas, env, shloc) { const int sh0 = shls_slice[0]; const int sh1 = shls_slice[1]; const int nao = ao_loc[sh1] - ao_loc[sh0]; int ip, ib, k, iloc, ish; size_t aoff; int ncart = NCTR_CART * param[TENSOR] * param[POS_E1]; double *buf = malloc(sizeof(double) * BLKSIZE*(NPRIMAX*2+ncart)); #pragma omp for schedule(static) for (k = 0; k < nblk*nshblk; k++) { iloc = k / nblk; ish = shloc[iloc]; aoff = ao_loc[ish] - ao_loc[sh0]; ib = k - iloc * nblk; ip = ib * BLKSIZE; (*fiter)(feval, fexp, fac, nao, ngrids, MIN(ngrids-ip, BLKSIZE), param, shloc+iloc, ao_loc, buf, ao+aoff*Ngrids+ip, coord+ip, non0table+ib*nbas, atm, natm, bas, nbas, env); } free(buf); } } void GTOeval_sph_drv(void (*feval)(), int (*fexp)(), double fac, int ngrids, int param[], int *shls_slice, int *ao_loc, double *ao, double *coord, char *non0table, int *atm, int natm, int *bas, int nbas, double *env) { GTOeval_loop(GTOeval_sph_iter, feval, fexp, fac, ngrids, param, shls_slice, ao_loc, ao, coord, non0table, atm, natm, bas, nbas, env); } void GTOeval_cart_drv(void (*feval)(), int (*fexp)(), double fac, int ngrids, int param[], int *shls_slice, int *ao_loc, double *ao, double *coord, char *non0table, int *atm, int natm, int *bas, int nbas, double *env) { GTOeval_loop(GTOeval_cart_iter, feval, fexp, fac, ngrids, param, shls_slice, ao_loc, ao, coord, non0table, atm, natm, bas, nbas, env); } void GTOeval_spinor_drv(void (*feval)(), int (*fexp)(), void (*c2s)(), double fac, int ngrids, int param[], int *shls_slice, int *ao_loc, double complex *ao, double *coord, char *non0table, int *atm, int natm, int *bas, int nbas, double *env) { int shloc[shls_slice[1]-shls_slice[0]+1]; const int nshblk = GTOshloc_by_atom(shloc, shls_slice, ao_loc, atm, bas); const int nblk = (ngrids+BLKSIZE-1) / BLKSIZE; const size_t Ngrids = ngrids; #pragma omp parallel default(none) \ shared(feval, fexp, c2s, fac, ngrids, param, ao_loc, shls_slice, \ ao, coord, non0table, atm, natm, bas, nbas, env, shloc) { const int sh0 = shls_slice[0]; const int sh1 = shls_slice[1]; const int nao = ao_loc[sh1] - ao_loc[sh0]; int ip, ib, k, iloc, ish; size_t aoff; int ncart = NCTR_CART * param[TENSOR] * param[POS_E1]; double *buf = malloc(sizeof(double) * BLKSIZE*(NPRIMAX*2+ncart)); #pragma omp for schedule(static) for (k = 0; k < nblk*nshblk; k++) { iloc = k / nblk; ish = shloc[iloc]; aoff = ao_loc[ish] - ao_loc[sh0]; ib = k - iloc * nblk; ip = ib * BLKSIZE; GTOeval_spinor_iter(feval, fexp, c2s, fac, nao, ngrids, MIN(ngrids-ip, BLKSIZE), param, shloc+iloc, ao_loc, buf, ao+aoff*Ngrids+ip, coord+ip, non0table+ib*nbas, atm, natm, bas, nbas, env); } free(buf); } }
threshold.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % TTTTT H H RRRR EEEEE SSSSS H H OOO L DDDD % % T H H R R E SS H H O O L D D % % T HHHHH RRRR EEE SSS HHHHH O O L D D % % T H H R R E SS H H O O L D D % % T H H R R EEEEE SSSSS H H OOO LLLLL DDDD % % % % % % MagickCore Image Threshold Methods % % % % Software Design % % Cristy % % October 1996 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/property.h" #include "magick/blob.h" #include "magick/cache-view.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colormap.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/configure.h" #include "magick/constitute.h" #include "magick/decorate.h" #include "magick/draw.h" #include "magick/enhance.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/effect.h" #include "magick/fx.h" #include "magick/gem.h" #include "magick/geometry.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/log.h" #include "magick/memory_.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/montage.h" #include "magick/option.h" #include "magick/pixel-private.h" #include "magick/quantize.h" #include "magick/quantum.h" #include "magick/random_.h" #include "magick/random-private.h" #include "magick/resize.h" #include "magick/resource_.h" #include "magick/segment.h" #include "magick/shear.h" #include "magick/signature-private.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/thread-private.h" #include "magick/threshold.h" #include "magick/transform.h" #include "magick/xml-tree.h" /* Define declarations. */ #define ThresholdsFilename "thresholds.xml" /* Typedef declarations. */ struct _ThresholdMap { char *map_id, *description; size_t width, height; ssize_t divisor, *levels; }; /* Static declarations. */ static const char *MinimalThresholdMap = "<?xml version=\"1.0\"?>" "<thresholds>" " <threshold map=\"threshold\" alias=\"1x1\">" " <description>Threshold 1x1 (non-dither)</description>" " <levels width=\"1\" height=\"1\" divisor=\"2\">" " 1" " </levels>" " </threshold>" " <threshold map=\"checks\" alias=\"2x1\">" " <description>Checkerboard 2x1 (dither)</description>" " <levels width=\"2\" height=\"2\" divisor=\"3\">" " 1 2" " 2 1" " </levels>" " </threshold>" "</thresholds>"; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d a p t i v e T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AdaptiveThresholdImage() selects an individual threshold for each pixel % based on the range of intensity values in its local neighborhood. This % allows for thresholding of an image whose global intensity histogram % doesn't contain distinctive peaks. % % The format of the AdaptiveThresholdImage method is: % % Image *AdaptiveThresholdImage(const Image *image, % const size_t width,const size_t height, % const ssize_t offset,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o width: the width of the local neighborhood. % % o height: the height of the local neighborhood. % % o offset: the mean offset. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AdaptiveThresholdImage(const Image *image, const size_t width,const size_t height,const ssize_t offset, ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view, *threshold_view; Image *threshold_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket zero; MagickRealType number_pixels; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); threshold_image=CloneImage(image,0,0,MagickTrue,exception); if (threshold_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(threshold_image,DirectClass) == MagickFalse) { InheritException(exception,&threshold_image->exception); threshold_image=DestroyImage(threshold_image); return((Image *) NULL); } /* Local adaptive threshold. */ status=MagickTrue; progress=0; GetMagickPixelPacket(image,&zero); number_pixels=(MagickRealType) (width*height); image_view=AcquireVirtualCacheView(image,exception); threshold_view=AcquireAuthenticCacheView(threshold_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,threshold_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; MagickPixelPacket channel_bias, channel_sum; register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p, *magick_restrict r; register IndexPacket *magick_restrict threshold_indexes; register PixelPacket *magick_restrict q; register ssize_t x; ssize_t u, v; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-((ssize_t) width/2L),y-(ssize_t) height/2L,image->columns+width,height,exception); q=GetCacheViewAuthenticPixels(threshold_view,0,y,threshold_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); threshold_indexes=GetCacheViewAuthenticIndexQueue(threshold_view); channel_bias=zero; channel_sum=zero; r=p; for (v=0; v < (ssize_t) height; v++) { for (u=0; u < (ssize_t) width; u++) { if (u == (ssize_t) (width-1)) { channel_bias.red+=r[u].red; channel_bias.green+=r[u].green; channel_bias.blue+=r[u].blue; channel_bias.opacity+=r[u].opacity; if (image->colorspace == CMYKColorspace) channel_bias.index=(MagickRealType) GetPixelIndex(indexes+(r-p)+u); } channel_sum.red+=r[u].red; channel_sum.green+=r[u].green; channel_sum.blue+=r[u].blue; channel_sum.opacity+=r[u].opacity; if (image->colorspace == CMYKColorspace) channel_sum.index=(MagickRealType) GetPixelIndex(indexes+(r-p)+u); } r+=image->columns+width; } for (x=0; x < (ssize_t) image->columns; x++) { MagickPixelPacket mean; mean=zero; r=p; channel_sum.red-=channel_bias.red; channel_sum.green-=channel_bias.green; channel_sum.blue-=channel_bias.blue; channel_sum.opacity-=channel_bias.opacity; channel_sum.index-=channel_bias.index; channel_bias=zero; for (v=0; v < (ssize_t) height; v++) { channel_bias.red+=r[0].red; channel_bias.green+=r[0].green; channel_bias.blue+=r[0].blue; channel_bias.opacity+=r[0].opacity; if (image->colorspace == CMYKColorspace) channel_bias.index=(MagickRealType) GetPixelIndex(indexes+x+(r-p)+0); channel_sum.red+=r[width-1].red; channel_sum.green+=r[width-1].green; channel_sum.blue+=r[width-1].blue; channel_sum.opacity+=r[width-1].opacity; if (image->colorspace == CMYKColorspace) channel_sum.index=(MagickRealType) GetPixelIndex(indexes+x+(r-p)+ width-1); r+=image->columns+width; } mean.red=(MagickRealType) (channel_sum.red/number_pixels+offset); mean.green=(MagickRealType) (channel_sum.green/number_pixels+offset); mean.blue=(MagickRealType) (channel_sum.blue/number_pixels+offset); mean.opacity=(MagickRealType) (channel_sum.opacity/number_pixels+offset); if (image->colorspace == CMYKColorspace) mean.index=(MagickRealType) (channel_sum.index/number_pixels+offset); SetPixelRed(q,((MagickRealType) GetPixelRed(q) <= mean.red) ? 0 : QuantumRange); SetPixelGreen(q,((MagickRealType) GetPixelGreen(q) <= mean.green) ? 0 : QuantumRange); SetPixelBlue(q,((MagickRealType) GetPixelBlue(q) <= mean.blue) ? 0 : QuantumRange); SetPixelOpacity(q,((MagickRealType) GetPixelOpacity(q) <= mean.opacity) ? 0 : QuantumRange); if (image->colorspace == CMYKColorspace) SetPixelIndex(threshold_indexes+x,(((MagickRealType) GetPixelIndex( threshold_indexes+x) <= mean.index) ? 0 : QuantumRange)); p++; q++; } sync=SyncCacheViewAuthenticPixels(threshold_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,ThresholdImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } threshold_view=DestroyCacheView(threshold_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) threshold_image=DestroyImage(threshold_image); return(threshold_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A u t o T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AutoThresholdImage() automatically selects a threshold and replaces each % pixel in the image with a black pixel if the image intentsity is less than % the selected threshold otherwise white. % % The format of the AutoThresholdImage method is: % % MagickBooleanType AutoThresholdImage(Image *image, % const AutoThresholdMethod method,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: The image to auto-threshold. % % o method: choose from Kapur, OTSU, or Triangle. % % o exception: return any errors or warnings in this structure. % */ static double KapurThreshold(const Image *image,const double *histogram, ExceptionInfo *exception) { #define MaxIntensity 255 double *black_entropy, *cumulative_histogram, entropy, epsilon, maximum_entropy, *white_entropy; register ssize_t i, j; size_t threshold; /* Compute optimal threshold from the entopy of the histogram. */ cumulative_histogram=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*cumulative_histogram)); black_entropy=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*black_entropy)); white_entropy=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*white_entropy)); if ((cumulative_histogram == (double *) NULL) || (black_entropy == (double *) NULL) || (white_entropy == (double *) NULL)) { if (white_entropy != (double *) NULL) white_entropy=(double *) RelinquishMagickMemory(white_entropy); if (black_entropy != (double *) NULL) black_entropy=(double *) RelinquishMagickMemory(black_entropy); if (cumulative_histogram != (double *) NULL) cumulative_histogram=(double *) RelinquishMagickMemory(cumulative_histogram); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(-1.0); } /* Entropy for black and white parts of the histogram. */ cumulative_histogram[0]=histogram[0]; for (i=1; i <= MaxIntensity; i++) cumulative_histogram[i]=cumulative_histogram[i-1]+histogram[i]; epsilon=MagickMinimumValue; for (j=0; j <= MaxIntensity; j++) { /* Black entropy. */ black_entropy[j]=0.0; if (cumulative_histogram[j] > epsilon) { entropy=0.0; for (i=0; i <= j; i++) if (histogram[i] > epsilon) entropy-=histogram[i]/cumulative_histogram[j]* log(histogram[i]/cumulative_histogram[j]); black_entropy[j]=entropy; } /* White entropy. */ white_entropy[j]=0.0; if ((1.0-cumulative_histogram[j]) > epsilon) { entropy=0.0; for (i=j+1; i <= MaxIntensity; i++) if (histogram[i] > epsilon) entropy-=histogram[i]/(1.0-cumulative_histogram[j])* log(histogram[i]/(1.0-cumulative_histogram[j])); white_entropy[j]=entropy; } } /* Find histogram bin with maximum entropy. */ maximum_entropy=black_entropy[0]+white_entropy[0]; threshold=0; for (j=1; j <= MaxIntensity; j++) if ((black_entropy[j]+white_entropy[j]) > maximum_entropy) { maximum_entropy=black_entropy[j]+white_entropy[j]; threshold=(size_t) j; } /* Free resources. */ white_entropy=(double *) RelinquishMagickMemory(white_entropy); black_entropy=(double *) RelinquishMagickMemory(black_entropy); cumulative_histogram=(double *) RelinquishMagickMemory(cumulative_histogram); return(100.0*threshold/MaxIntensity); } static double OTSUThreshold(const Image *image,const double *histogram, ExceptionInfo *exception) { double max_sigma, *myu, *omega, *probability, *sigma, threshold; register ssize_t i; /* Compute optimal threshold from maximization of inter-class variance. */ myu=(double *) AcquireQuantumMemory(MaxIntensity+1UL,sizeof(*myu)); omega=(double *) AcquireQuantumMemory(MaxIntensity+1UL,sizeof(*omega)); probability=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*probability)); sigma=(double *) AcquireQuantumMemory(MaxIntensity+1UL,sizeof(*sigma)); if ((myu == (double *) NULL) || (omega == (double *) NULL) || (probability == (double *) NULL) || (sigma == (double *) NULL)) { if (sigma != (double *) NULL) sigma=(double *) RelinquishMagickMemory(sigma); if (probability != (double *) NULL) probability=(double *) RelinquishMagickMemory(probability); if (omega != (double *) NULL) omega=(double *) RelinquishMagickMemory(omega); if (myu != (double *) NULL) myu=(double *) RelinquishMagickMemory(myu); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(-1.0); } /* Calculate probability density. */ for (i=0; i <= (ssize_t) MaxIntensity; i++) probability[i]=histogram[i]; /* Generate probability of graylevels and mean value for separation. */ omega[0]=probability[0]; myu[0]=0.0; for (i=1; i <= (ssize_t) MaxIntensity; i++) { omega[i]=omega[i-1]+probability[i]; myu[i]=myu[i-1]+i*probability[i]; } /* Sigma maximization: inter-class variance and compute optimal threshold. */ threshold=0; max_sigma=0.0; for (i=0; i < (ssize_t) MaxIntensity; i++) { sigma[i]=0.0; if ((omega[i] != 0.0) && (omega[i] != 1.0)) sigma[i]=pow(myu[MaxIntensity]*omega[i]-myu[i],2.0)/(omega[i]*(1.0- omega[i])); if (sigma[i] > max_sigma) { max_sigma=sigma[i]; threshold=(double) i; } } /* Free resources. */ myu=(double *) RelinquishMagickMemory(myu); omega=(double *) RelinquishMagickMemory(omega); probability=(double *) RelinquishMagickMemory(probability); sigma=(double *) RelinquishMagickMemory(sigma); return(100.0*threshold/MaxIntensity); } static double TriangleThreshold(const Image *image,const double *histogram, ExceptionInfo *exception) { double a, b, c, count, distance, inverse_ratio, max_distance, segment, x1, x2, y1, y2; register ssize_t i; ssize_t end, max, start, threshold; /* Compute optimal threshold with triangle algorithm. */ (void) exception; start=0; /* find start bin, first bin not zero count */ for (i=0; i <= (ssize_t) MaxIntensity; i++) if (histogram[i] > 0.0) { start=i; break; } end=0; /* find end bin, last bin not zero count */ for (i=(ssize_t) MaxIntensity; i >= 0; i--) if (histogram[i] > 0.0) { end=i; break; } max=0; /* find max bin, bin with largest count */ count=0.0; for (i=0; i <= (ssize_t) MaxIntensity; i++) if (histogram[i] > count) { max=i; count=histogram[i]; } /* Compute threshold at split point. */ x1=(double) max; y1=histogram[max]; x2=(double) end; if ((max-start) >= (end-max)) x2=(double) start; y2=0.0; a=y1-y2; b=x2-x1; c=(-1.0)*(a*x1+b*y1); inverse_ratio=1.0/sqrt(a*a+b*b+c*c); threshold=0; max_distance=0.0; if (x2 == (double) start) for (i=start; i < max; i++) { segment=inverse_ratio*(a*i+b*histogram[i]+c); distance=sqrt(segment*segment); if ((distance > max_distance) && (segment > 0.0)) { threshold=i; max_distance=distance; } } else for (i=end; i > max; i--) { segment=inverse_ratio*(a*i+b*histogram[i]+c); distance=sqrt(segment*segment); if ((distance > max_distance) && (segment < 0.0)) { threshold=i; max_distance=distance; } } return(100.0*threshold/MaxIntensity); } MagickExport MagickBooleanType AutoThresholdImage(Image *image, const AutoThresholdMethod method,ExceptionInfo *exception) { CacheView *image_view; char property[MagickPathExtent]; double gamma, *histogram, sum, threshold; MagickBooleanType status; register ssize_t i; ssize_t y; /* Form histogram. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); histogram=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*histogram)); if (histogram == (double *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=MagickTrue; (void) memset(histogram,0,(MaxIntensity+1UL)*sizeof(*histogram)); image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *magick_restrict p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { double intensity = GetPixelIntensity(image,p); histogram[ScaleQuantumToChar(ClampToQuantum(intensity))]++; p++; } } image_view=DestroyCacheView(image_view); /* Normalize histogram. */ sum=0.0; for (i=0; i <= (ssize_t) MaxIntensity; i++) sum+=histogram[i]; gamma=PerceptibleReciprocal(sum); for (i=0; i <= (ssize_t) MaxIntensity; i++) histogram[i]=gamma*histogram[i]; /* Discover threshold from histogram. */ switch (method) { case KapurThresholdMethod: { threshold=KapurThreshold(image,histogram,exception); break; } case OTSUThresholdMethod: default: { threshold=OTSUThreshold(image,histogram,exception); break; } case TriangleThresholdMethod: { threshold=TriangleThreshold(image,histogram,exception); break; } } histogram=(double *) RelinquishMagickMemory(histogram); if (threshold < 0.0) status=MagickFalse; if (status == MagickFalse) return(MagickFalse); /* Threshold image. */ (void) FormatLocaleString(property,MagickPathExtent,"%g%%",threshold); (void) SetImageProperty(image,"auto-threshold:threshold",property); return(BilevelImage(image,QuantumRange*threshold/100.0)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B i l e v e l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BilevelImage() changes the value of individual pixels based on the % intensity of each pixel channel. The result is a high-contrast image. % % More precisely each channel value of the image is 'thresholded' so that if % it is equal to or less than the given value it is set to zero, while any % value greater than that give is set to it maximum or QuantumRange. % % This function is what is used to implement the "-threshold" operator for % the command line API. % % If the default channel setting is given the image is thresholded using just % the gray 'intensity' of the image, rather than the individual channels. % % The format of the BilevelImageChannel method is: % % MagickBooleanType BilevelImage(Image *image,const double threshold) % MagickBooleanType BilevelImageChannel(Image *image, % const ChannelType channel,const double threshold) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o threshold: define the threshold values. % % Aside: You can get the same results as operator using LevelImageChannels() % with the 'threshold' value for both the black_point and the white_point. % */ MagickExport MagickBooleanType BilevelImage(Image *image,const double threshold) { MagickBooleanType status; status=BilevelImageChannel(image,DefaultChannels,threshold); return(status); } MagickExport MagickBooleanType BilevelImageChannel(Image *image, const ChannelType channel,const double threshold) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) SetImageColorspace(image,sRGBColorspace); /* Bilevel threshold image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); if ((channel & SyncChannels) != 0) { for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,GetPixelIntensity(image,q) <= threshold ? 0 : QuantumRange); SetPixelGreen(q,GetPixelRed(q)); SetPixelBlue(q,GetPixelRed(q)); q++; } } else for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,(MagickRealType) GetPixelRed(q) <= threshold ? 0 : QuantumRange); if ((channel & GreenChannel) != 0) SetPixelGreen(q,(MagickRealType) GetPixelGreen(q) <= threshold ? 0 : QuantumRange); if ((channel & BlueChannel) != 0) SetPixelBlue(q,(MagickRealType) GetPixelBlue(q) <= threshold ? 0 : QuantumRange); if ((channel & OpacityChannel) != 0) { if (image->matte == MagickFalse) SetPixelOpacity(q,(MagickRealType) GetPixelOpacity(q) <= threshold ? 0 : QuantumRange); else SetPixelAlpha(q,(MagickRealType) GetPixelAlpha(q) <= threshold ? OpaqueOpacity : TransparentOpacity); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,(MagickRealType) GetPixelIndex(indexes+x) <= threshold ? 0 : QuantumRange); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,ThresholdImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B l a c k T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BlackThresholdImage() is like ThresholdImage() but forces all pixels below % the threshold into black while leaving all pixels at or above the threshold % unchanged. % % The format of the BlackThresholdImage method is: % % MagickBooleanType BlackThresholdImage(Image *image,const char *threshold) % MagickBooleanType BlackThresholdImageChannel(Image *image, % const ChannelType channel,const char *threshold, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel or channels to be thresholded. % % o threshold: Define the threshold value. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType BlackThresholdImage(Image *image, const char *threshold) { MagickBooleanType status; status=BlackThresholdImageChannel(image,DefaultChannels,threshold, &image->exception); return(status); } MagickExport MagickBooleanType BlackThresholdImageChannel(Image *image, const ChannelType channel,const char *thresholds,ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; GeometryInfo geometry_info; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket threshold; MagickStatusType flags; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (thresholds == (const char *) NULL) return(MagickTrue); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); GetMagickPixelPacket(image,&threshold); flags=ParseGeometry(thresholds,&geometry_info); threshold.red=geometry_info.rho; threshold.green=geometry_info.sigma; if ((flags & SigmaValue) == 0) threshold.green=threshold.red; threshold.blue=geometry_info.xi; if ((flags & XiValue) == 0) threshold.blue=threshold.red; threshold.opacity=geometry_info.psi; if ((flags & PsiValue) == 0) threshold.opacity=threshold.red; threshold.index=geometry_info.chi; if ((flags & ChiValue) == 0) threshold.index=threshold.red; if ((flags & PercentValue) != 0) { threshold.red*=(MagickRealType) (QuantumRange/100.0); threshold.green*=(MagickRealType) (QuantumRange/100.0); threshold.blue*=(MagickRealType) (QuantumRange/100.0); threshold.opacity*=(MagickRealType) (QuantumRange/100.0); threshold.index*=(MagickRealType) (QuantumRange/100.0); } if ((IsMagickGray(&threshold) == MagickFalse) && (IsGrayColorspace(image->colorspace) != MagickFalse)) (void) SetImageColorspace(image,sRGBColorspace); /* Black threshold image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if (((channel & RedChannel) != 0) && ((MagickRealType) GetPixelRed(q) < threshold.red)) SetPixelRed(q,0); if (((channel & GreenChannel) != 0) && ((MagickRealType) GetPixelGreen(q) < threshold.green)) SetPixelGreen(q,0); if (((channel & BlueChannel) != 0) && ((MagickRealType) GetPixelBlue(q) < threshold.blue)) SetPixelBlue(q,0); if (((channel & OpacityChannel) != 0) && ((MagickRealType) GetPixelOpacity(q) < threshold.opacity)) SetPixelOpacity(q,0); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace) && ((MagickRealType) GetPixelIndex(indexes+x) < threshold.index)) SetPixelIndex(indexes+x,0); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,ThresholdImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l a m p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClampImage() set each pixel whose value is below zero to zero and any the % pixel whose value is above the quantum range to the quantum range (e.g. % 65535) otherwise the pixel value remains unchanged. % % The format of the ClampImageChannel method is: % % MagickBooleanType ClampImage(Image *image) % MagickBooleanType ClampImageChannel(Image *image, % const ChannelType channel) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % */ MagickExport MagickBooleanType ClampImage(Image *image) { MagickBooleanType status; status=ClampImageChannel(image,DefaultChannels); return(status); } MagickExport MagickBooleanType ClampImageChannel(Image *image, const ChannelType channel) { #define ClampImageTag "Clamp/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) { register ssize_t i; register PixelPacket *magick_restrict q; q=image->colormap; for (i=0; i < (ssize_t) image->colors; i++) { SetPixelRed(q,ClampPixel((MagickRealType) GetPixelRed(q))); SetPixelGreen(q,ClampPixel((MagickRealType) GetPixelGreen(q))); SetPixelBlue(q,ClampPixel((MagickRealType) GetPixelBlue(q))); SetPixelOpacity(q,ClampPixel((MagickRealType) GetPixelOpacity(q))); q++; } return(SyncImage(image)); } /* Clamp image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,ClampPixel((MagickRealType) GetPixelRed(q))); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampPixel((MagickRealType) GetPixelGreen(q))); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampPixel((MagickRealType) GetPixelBlue(q))); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,ClampPixel((MagickRealType) GetPixelOpacity(q))); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,ClampPixel((MagickRealType) GetPixelIndex( indexes+x))); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,ClampImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y T h r e s h o l d M a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyThresholdMap() de-allocate the given ThresholdMap % % The format of the ListThresholdMaps method is: % % ThresholdMap *DestroyThresholdMap(Threshold *map) % % A description of each parameter follows. % % o map: Pointer to the Threshold map to destroy % */ MagickExport ThresholdMap *DestroyThresholdMap(ThresholdMap *map) { assert(map != (ThresholdMap *) NULL); if (map->map_id != (char *) NULL) map->map_id=DestroyString(map->map_id); if (map->description != (char *) NULL) map->description=DestroyString(map->description); if (map->levels != (ssize_t *) NULL) map->levels=(ssize_t *) RelinquishMagickMemory(map->levels); map=(ThresholdMap *) RelinquishMagickMemory(map); return(map); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t T h r e s h o l d M a p F i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetThresholdMapFile() look for a given threshold map name or alias in the % given XML file data, and return the allocated the map when found. % % The format of the ListThresholdMaps method is: % % ThresholdMap *GetThresholdMap(const char *xml,const char *filename, % const char *map_id,ExceptionInfo *exception) % % A description of each parameter follows. % % o xml: The threshold map list in XML format. % % o filename: The threshold map XML filename. % % o map_id: ID of the map to look for in XML list. % % o exception: return any errors or warnings in this structure. % */ MagickExport ThresholdMap *GetThresholdMapFile(const char *xml, const char *filename,const char *map_id,ExceptionInfo *exception) { const char *attribute, *content; double value; ThresholdMap *map; XMLTreeInfo *description, *levels, *threshold, *thresholds; map = (ThresholdMap *) NULL; (void) LogMagickEvent(ConfigureEvent,GetMagickModule(), "Loading threshold map file \"%s\" ...",filename); thresholds=NewXMLTree(xml,exception); if ( thresholds == (XMLTreeInfo *) NULL ) return(map); for (threshold = GetXMLTreeChild(thresholds,"threshold"); threshold != (XMLTreeInfo *) NULL; threshold = GetNextXMLTreeTag(threshold) ) { attribute=GetXMLTreeAttribute(threshold, "map"); if ((attribute != (char *) NULL) && (LocaleCompare(map_id,attribute) == 0)) break; attribute=GetXMLTreeAttribute(threshold, "alias"); if ((attribute != (char *) NULL) && (LocaleCompare(map_id,attribute) == 0)) break; } if (threshold == (XMLTreeInfo *) NULL) { thresholds=DestroyXMLTree(thresholds); return(map); } description=GetXMLTreeChild(threshold,"description"); if (description == (XMLTreeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingElement", "<description>, map \"%s\"", map_id); thresholds=DestroyXMLTree(thresholds); return(map); } levels=GetXMLTreeChild(threshold,"levels"); if (levels == (XMLTreeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingElement", "<levels>, map \"%s\"", map_id); thresholds=DestroyXMLTree(thresholds); return(map); } /* The map has been found -- allocate a Threshold Map to return */ map=(ThresholdMap *) AcquireMagickMemory(sizeof(ThresholdMap)); if (map == (ThresholdMap *) NULL) ThrowFatalException(ResourceLimitFatalError,"UnableToAcquireThresholdMap"); map->map_id=(char *) NULL; map->description=(char *) NULL; map->levels=(ssize_t *) NULL; /* Assign basic attributeibutes. */ attribute=GetXMLTreeAttribute(threshold,"map"); if (attribute != (char *) NULL) map->map_id=ConstantString(attribute); content=GetXMLTreeContent(description); if (content != (char *) NULL) map->description=ConstantString(content); attribute=GetXMLTreeAttribute(levels,"width"); if (attribute == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute", "<levels width>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } map->width=StringToUnsignedLong(attribute); if (map->width == 0) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidAttribute", "<levels width>, map \"%s\"", map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } attribute=GetXMLTreeAttribute(levels,"height"); if (attribute == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute", "<levels height>, map \"%s\"", map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } map->height=StringToUnsignedLong(attribute); if (map->height == 0) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidAttribute", "<levels height>, map \"%s\"", map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } attribute=GetXMLTreeAttribute(levels, "divisor"); if (attribute == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute", "<levels divisor>, map \"%s\"", map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } map->divisor=(ssize_t) StringToLong(attribute); if (map->divisor < 2) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidAttribute", "<levels divisor>, map \"%s\"", map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } /* Allocate theshold levels array. */ content=GetXMLTreeContent(levels); if (content == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingContent", "<levels>, map \"%s\"", map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } map->levels=(ssize_t *) AcquireQuantumMemory((size_t) map->width,map->height* sizeof(*map->levels)); if (map->levels == (ssize_t *) NULL) ThrowFatalException(ResourceLimitFatalError,"UnableToAcquireThresholdMap"); { char *p; register ssize_t i; /* Parse levels into integer array. */ for (i=0; i< (ssize_t) (map->width*map->height); i++) { map->levels[i]=(ssize_t) strtol(content,&p,10); if (p == content) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidContent", "<level> too few values, map \"%s\"", map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } if ((map->levels[i] < 0) || (map->levels[i] > map->divisor)) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidContent", "<level> %.20g out of range, map \"%s\"", (double) map->levels[i],map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } content=p; } value=(double) strtol(content,&p,10); (void) value; if (p != content) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidContent", "<level> too many values, map \"%s\"", map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } } thresholds=DestroyXMLTree(thresholds); return(map); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t T h r e s h o l d M a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetThresholdMap() load and search one or more threshold map files for the % a map matching the given name or aliase. % % The format of the GetThresholdMap method is: % % ThresholdMap *GetThresholdMap(const char *map_id, % ExceptionInfo *exception) % % A description of each parameter follows. % % o map_id: ID of the map to look for. % % o exception: return any errors or warnings in this structure. % */ MagickExport ThresholdMap *GetThresholdMap(const char *map_id, ExceptionInfo *exception) { const StringInfo *option; LinkedListInfo *options; ThresholdMap *map; map=GetThresholdMapFile(MinimalThresholdMap,"built-in",map_id,exception); if (map != (ThresholdMap *) NULL) return(map); options=GetConfigureOptions(ThresholdsFilename,exception); option=(const StringInfo *) GetNextValueInLinkedList(options); while (option != (const StringInfo *) NULL) { map=GetThresholdMapFile((const char *) GetStringInfoDatum(option), GetStringInfoPath(option),map_id,exception); if (map != (ThresholdMap *) NULL) break; option=(const StringInfo *) GetNextValueInLinkedList(options); } options=DestroyConfigureOptions(options); return(map); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + L i s t T h r e s h o l d M a p F i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ListThresholdMapFile() lists the threshold maps and their descriptions % in the given XML file data. % % The format of the ListThresholdMaps method is: % % MagickBooleanType ListThresholdMaps(FILE *file,const char*xml, % const char *filename,ExceptionInfo *exception) % % A description of each parameter follows. % % o file: An pointer to the output FILE. % % o xml: The threshold map list in XML format. % % o filename: The threshold map XML filename. % % o exception: return any errors or warnings in this structure. % */ MagickBooleanType ListThresholdMapFile(FILE *file,const char *xml, const char *filename,ExceptionInfo *exception) { XMLTreeInfo *thresholds,*threshold,*description; const char *map,*alias,*content; assert( xml != (char *) NULL ); assert( file != (FILE *) NULL ); (void) LogMagickEvent(ConfigureEvent,GetMagickModule(), "Loading threshold map file \"%s\" ...",filename); thresholds=NewXMLTree(xml,exception); if ( thresholds == (XMLTreeInfo *) NULL ) return(MagickFalse); (void) FormatLocaleFile(file,"%-16s %-12s %s\n","Map","Alias","Description"); (void) FormatLocaleFile(file, "----------------------------------------------------\n"); for( threshold = GetXMLTreeChild(thresholds,"threshold"); threshold != (XMLTreeInfo *) NULL; threshold = GetNextXMLTreeTag(threshold) ) { map = GetXMLTreeAttribute(threshold, "map"); if (map == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute", "<map>"); thresholds=DestroyXMLTree(thresholds); return(MagickFalse); } alias = GetXMLTreeAttribute(threshold, "alias"); /* alias is optional, no if test needed */ description=GetXMLTreeChild(threshold,"description"); if ( description == (XMLTreeInfo *) NULL ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingElement", "<description>, map \"%s\"", map); thresholds=DestroyXMLTree(thresholds); return(MagickFalse); } content=GetXMLTreeContent(description); if ( content == (char *) NULL ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingContent", "<description>, map \"%s\"", map); thresholds=DestroyXMLTree(thresholds); return(MagickFalse); } (void) FormatLocaleFile(file,"%-16s %-12s %s\n",map,alias ? alias : "", content); } thresholds=DestroyXMLTree(thresholds); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L i s t T h r e s h o l d M a p s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ListThresholdMaps() lists the threshold maps and their descriptions % as defined by "threshold.xml" to a file. % % The format of the ListThresholdMaps method is: % % MagickBooleanType ListThresholdMaps(FILE *file,ExceptionInfo *exception) % % A description of each parameter follows. % % o file: An pointer to the output FILE. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ListThresholdMaps(FILE *file, ExceptionInfo *exception) { const StringInfo *option; LinkedListInfo *options; MagickStatusType status; status=MagickTrue; if (file == (FILE *) NULL) file=stdout; options=GetConfigureOptions(ThresholdsFilename,exception); (void) FormatLocaleFile(file, "\n Threshold Maps for Ordered Dither Operations\n"); option=(const StringInfo *) GetNextValueInLinkedList(options); while (option != (const StringInfo *) NULL) { (void) FormatLocaleFile(file,"\nPath: %s\n\n",GetStringInfoPath(option)); status&=ListThresholdMapFile(file,(const char *) GetStringInfoDatum(option), GetStringInfoPath(option),exception); option=(const StringInfo *) GetNextValueInLinkedList(options); } options=DestroyConfigureOptions(options); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % O r d e r e d D i t h e r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OrderedDitherImage() uses the ordered dithering technique of reducing color % images to monochrome using positional information to retain as much % information as possible. % % WARNING: This function is deprecated, and is now just a call to % the more more powerful OrderedPosterizeImage(); function. % % The format of the OrderedDitherImage method is: % % MagickBooleanType OrderedDitherImage(Image *image) % MagickBooleanType OrderedDitherImageChannel(Image *image, % const ChannelType channel,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel or channels to be thresholded. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType OrderedDitherImage(Image *image) { MagickBooleanType status; status=OrderedDitherImageChannel(image,DefaultChannels,&image->exception); return(status); } MagickExport MagickBooleanType OrderedDitherImageChannel(Image *image, const ChannelType channel,ExceptionInfo *exception) { MagickBooleanType status; /* Call the augumented function OrderedPosterizeImage() */ status=OrderedPosterizeImageChannel(image,channel,"o8x8",exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % O r d e r e d P o s t e r i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OrderedPosterizeImage() will perform a ordered dither based on a number % of pre-defined dithering threshold maps, but over multiple intensity % levels, which can be different for different channels, according to the % input argument. % % The format of the OrderedPosterizeImage method is: % % MagickBooleanType OrderedPosterizeImage(Image *image, % const char *threshold_map,ExceptionInfo *exception) % MagickBooleanType OrderedPosterizeImageChannel(Image *image, % const ChannelType channel,const char *threshold_map, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel or channels to be thresholded. % % o threshold_map: A string containing the name of the threshold dither % map to use, followed by zero or more numbers representing the number % of color levels tho dither between. % % Any level number less than 2 will be equivalent to 2, and means only % binary dithering will be applied to each color channel. % % No numbers also means a 2 level (bitmap) dither will be applied to all % channels, while a single number is the number of levels applied to each % channel in sequence. More numbers will be applied in turn to each of % the color channels. % % For example: "o3x3,6" will generate a 6 level posterization of the % image with a ordered 3x3 diffused pixel dither being applied between % each level. While checker,8,8,4 will produce a 332 colormaped image % with only a single checkerboard hash pattern (50% grey) between each % color level, to basically double the number of color levels with % a bare minimim of dithering. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType OrderedPosterizeImage(Image *image, const char *threshold_map,ExceptionInfo *exception) { MagickBooleanType status; status=OrderedPosterizeImageChannel(image,DefaultChannels,threshold_map, exception); return(status); } MagickExport MagickBooleanType OrderedPosterizeImageChannel(Image *image, const ChannelType channel,const char *threshold_map,ExceptionInfo *exception) { #define DitherImageTag "Dither/Image" CacheView *image_view; LongPixelPacket levels; MagickBooleanType status; MagickOffsetType progress; ssize_t y; ThresholdMap *map; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (threshold_map == (const char *) NULL) return(MagickTrue); { char token[MaxTextExtent]; register const char *p; p=(char *)threshold_map; while (((isspace((int) ((unsigned char) *p)) != 0) || (*p == ',')) && (*p != '\0')) p++; threshold_map=p; while (((isspace((int) ((unsigned char) *p)) == 0) && (*p != ',')) && (*p != '\0')) { if ((p-threshold_map) >= (MaxTextExtent-1)) break; token[p-threshold_map] = *p; p++; } token[p-threshold_map] = '\0'; map = GetThresholdMap(token, exception); if ( map == (ThresholdMap *) NULL ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : '%s'","ordered-dither",threshold_map); return(MagickFalse); } } /* Set channel levels from extra comma separated arguments Default to 2, the single value given, or individual channel values */ #if 1 { /* parse directly as a comma separated list of integers */ char *p; p = strchr((char *) threshold_map,','); if ( p != (char *) NULL && isdigit((int) ((unsigned char) *(++p))) ) levels.index = (unsigned int) strtoul(p, &p, 10); else levels.index = 2; levels.red = ((channel & RedChannel ) != 0) ? levels.index : 0; levels.green = ((channel & GreenChannel) != 0) ? levels.index : 0; levels.blue = ((channel & BlueChannel) != 0) ? levels.index : 0; levels.opacity = ((channel & OpacityChannel) != 0) ? levels.index : 0; levels.index = ((channel & IndexChannel) != 0 && (image->colorspace == CMYKColorspace)) ? levels.index : 0; /* if more than a single number, each channel has a separate value */ if ( p != (char *) NULL && *p == ',' ) { p=strchr((char *) threshold_map,','); p++; if ((channel & RedChannel) != 0) levels.red = (unsigned int) strtoul(p, &p, 10), (void)(*p == ',' && p++); if ((channel & GreenChannel) != 0) levels.green = (unsigned int) strtoul(p, &p, 10), (void)(*p == ',' && p++); if ((channel & BlueChannel) != 0) levels.blue = (unsigned int) strtoul(p, &p, 10), (void)(*p == ',' && p++); if ((channel & IndexChannel) != 0 && image->colorspace == CMYKColorspace) levels.index=(unsigned int) strtoul(p, &p, 10), (void)(*p == ',' && p++); if ((channel & OpacityChannel) != 0) levels.opacity = (unsigned int) strtoul(p, &p, 10), (void)(*p == ',' && p++); } } #else /* Parse level values as a geometry */ /* This difficult! * How to map GeometryInfo structure elements into * LongPixelPacket structure elements, but according to channel? * Note the channels list may skip elements!!!! * EG -channel BA -ordered-dither map,2,3 * will need to map g.rho -> l.blue, and g.sigma -> l.opacity * A simpler way is needed, probably converting geometry to a temporary * array, then using channel to advance the index into ssize_t pixel packet. */ #endif #if 0 printf("DEBUG levels r=%u g=%u b=%u a=%u i=%u\n", levels.red, levels.green, levels.blue, levels.opacity, levels.index); #endif { /* Do the posterized ordered dithering of the image */ ssize_t d; /* d = number of psuedo-level divisions added between color levels */ d = map->divisor-1; /* reduce levels to levels - 1 */ levels.red = levels.red ? levels.red-1 : 0; levels.green = levels.green ? levels.green-1 : 0; levels.blue = levels.blue ? levels.blue-1 : 0; levels.opacity = levels.opacity ? levels.opacity-1 : 0; levels.index = levels.index ? levels.index-1 : 0; if (SetImageStorageClass(image,DirectClass) == MagickFalse) { InheritException(exception,&image->exception); return(MagickFalse); } status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t threshold, t, l; /* Figure out the dither threshold for this pixel This must be a integer from 1 to map->divisor-1 */ threshold = map->levels[(x%map->width) +map->width*(y%map->height)]; /* Dither each channel in the image as appropriate Notes on the integer Math... total number of divisions = (levels-1)*(divisor-1)+1) t1 = this colors psuedo_level = q->red * total_divisions / (QuantumRange+1) l = posterization level 0..levels t = dither threshold level 0..divisor-1 NB: 0 only on last Each color_level is of size QuantumRange / (levels-1) NB: All input levels and divisor are already had 1 subtracted Opacity is inverted so 'off' represents transparent. */ if (levels.red) { t = (ssize_t) (QuantumScale*GetPixelRed(q)*(levels.red*d+1)); l = t/d; t = t-l*d; SetPixelRed(q,ClampToQuantum((MagickRealType) ((l+(t >= threshold))*(MagickRealType) QuantumRange/levels.red))); } if (levels.green) { t = (ssize_t) (QuantumScale*GetPixelGreen(q)* (levels.green*d+1)); l = t/d; t = t-l*d; SetPixelGreen(q,ClampToQuantum((MagickRealType) ((l+(t >= threshold))*(MagickRealType) QuantumRange/levels.green))); } if (levels.blue) { t = (ssize_t) (QuantumScale*GetPixelBlue(q)* (levels.blue*d+1)); l = t/d; t = t-l*d; SetPixelBlue(q,ClampToQuantum((MagickRealType) ((l+(t >= threshold))*(MagickRealType) QuantumRange/levels.blue))); } if (levels.opacity) { t = (ssize_t) ((1.0-QuantumScale*GetPixelOpacity(q))* (levels.opacity*d+1)); l = t/d; t = t-l*d; SetPixelOpacity(q,ClampToQuantum((MagickRealType) ((1.0-l-(t >= threshold))*(MagickRealType) QuantumRange/ levels.opacity))); } if (levels.index) { t = (ssize_t) (QuantumScale*GetPixelIndex(indexes+x)* (levels.index*d+1)); l = t/d; t = t-l*d; SetPixelIndex(indexes+x,ClampToQuantum((MagickRealType) ((l+ (t>=threshold))*(MagickRealType) QuantumRange/levels.index))); } q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,DitherImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); } map=DestroyThresholdMap(map); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P e r c e p t i b l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PerceptibleImage() set each pixel whose value is less than |epsilon| to % epsilon or -epsilon (whichever is closer) otherwise the pixel value remains % unchanged. % % The format of the PerceptibleImageChannel method is: % % MagickBooleanType PerceptibleImage(Image *image,const double epsilon) % MagickBooleanType PerceptibleImageChannel(Image *image, % const ChannelType channel,const double epsilon) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o epsilon: the epsilon threshold (e.g. 1.0e-9). % */ static inline Quantum PerceptibleThreshold(const Quantum quantum, const double epsilon) { double sign; sign=(double) quantum < 0.0 ? -1.0 : 1.0; if ((sign*quantum) >= epsilon) return(quantum); return((Quantum) (sign*epsilon)); } MagickExport MagickBooleanType PerceptibleImage(Image *image, const double epsilon) { MagickBooleanType status; status=PerceptibleImageChannel(image,DefaultChannels,epsilon); return(status); } MagickExport MagickBooleanType PerceptibleImageChannel(Image *image, const ChannelType channel,const double epsilon) { #define PerceptibleImageTag "Perceptible/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) { register ssize_t i; register PixelPacket *magick_restrict q; q=image->colormap; for (i=0; i < (ssize_t) image->colors; i++) { SetPixelRed(q,PerceptibleThreshold(GetPixelRed(q),epsilon)); SetPixelGreen(q,PerceptibleThreshold(GetPixelGreen(q),epsilon)); SetPixelBlue(q,PerceptibleThreshold(GetPixelBlue(q),epsilon)); SetPixelOpacity(q,PerceptibleThreshold(GetPixelOpacity(q),epsilon)); q++; } return(SyncImage(image)); } /* Perceptible image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,PerceptibleThreshold(GetPixelRed(q),epsilon)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,PerceptibleThreshold(GetPixelGreen(q),epsilon)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,PerceptibleThreshold(GetPixelBlue(q),epsilon)); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,PerceptibleThreshold(GetPixelOpacity(q),epsilon)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,PerceptibleThreshold(GetPixelIndex(indexes+x), epsilon)); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,PerceptibleImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R a n d o m T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RandomThresholdImage() changes the value of individual pixels based on the % intensity of each pixel compared to a random threshold. The result is a % low-contrast, two color image. % % The format of the RandomThresholdImage method is: % % MagickBooleanType RandomThresholdImageChannel(Image *image, % const char *thresholds,ExceptionInfo *exception) % MagickBooleanType RandomThresholdImageChannel(Image *image, % const ChannelType channel,const char *thresholds, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel or channels to be thresholded. % % o thresholds: a geometry string containing low,high thresholds. If the % string contains 2x2, 3x3, or 4x4, an ordered dither of order 2, 3, or 4 % is performed instead. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType RandomThresholdImage(Image *image, const char *thresholds,ExceptionInfo *exception) { MagickBooleanType status; status=RandomThresholdImageChannel(image,DefaultChannels,thresholds, exception); return(status); } MagickExport MagickBooleanType RandomThresholdImageChannel(Image *image, const ChannelType channel,const char *thresholds,ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; GeometryInfo geometry_info; MagickStatusType flags; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket threshold; MagickRealType min_threshold, max_threshold; RandomInfo **magick_restrict random_info; ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) unsigned long key; #endif assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (thresholds == (const char *) NULL) return(MagickTrue); GetMagickPixelPacket(image,&threshold); min_threshold=0.0; max_threshold=(MagickRealType) QuantumRange; flags=ParseGeometry(thresholds,&geometry_info); min_threshold=geometry_info.rho; max_threshold=geometry_info.sigma; if ((flags & SigmaValue) == 0) max_threshold=min_threshold; if (strchr(thresholds,'%') != (char *) NULL) { max_threshold*=(MagickRealType) (0.01*QuantumRange); min_threshold*=(MagickRealType) (0.01*QuantumRange); } else if (((max_threshold == min_threshold) || (max_threshold == 1)) && (min_threshold <= 8)) { /* Backward Compatibility -- ordered-dither -- IM v 6.2.9-6. */ status=OrderedPosterizeImageChannel(image,channel,thresholds,exception); return(status); } /* Random threshold image. */ status=MagickTrue; progress=0; if (channel == CompositeChannels) { if (AcquireImageColormap(image,2) == MagickFalse) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); random_info=AcquireRandomInfoThreadSet(); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { IndexPacket index; MagickRealType intensity; intensity=GetPixelIntensity(image,q); if (intensity < min_threshold) threshold.index=min_threshold; else if (intensity > max_threshold) threshold.index=max_threshold; else threshold.index=(MagickRealType)(QuantumRange* GetPseudoRandomValue(random_info[id])); index=(IndexPacket) (intensity <= threshold.index ? 0 : 1); SetPixelIndex(indexes+x,index); SetPixelRGBO(q,image->colormap+(ssize_t) index); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,ThresholdImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); random_info=DestroyRandomInfoThreadSet(random_info); return(status); } if (SetImageStorageClass(image,DirectClass) == MagickFalse) { InheritException(exception,&image->exception); return(MagickFalse); } random_info=AcquireRandomInfoThreadSet(); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); register IndexPacket *magick_restrict indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) { if ((MagickRealType) GetPixelRed(q) < min_threshold) threshold.red=min_threshold; else if ((MagickRealType) GetPixelRed(q) > max_threshold) threshold.red=max_threshold; else threshold.red=(MagickRealType) (QuantumRange* GetPseudoRandomValue(random_info[id])); } if ((channel & GreenChannel) != 0) { if ((MagickRealType) GetPixelGreen(q) < min_threshold) threshold.green=min_threshold; else if ((MagickRealType) GetPixelGreen(q) > max_threshold) threshold.green=max_threshold; else threshold.green=(MagickRealType) (QuantumRange* GetPseudoRandomValue(random_info[id])); } if ((channel & BlueChannel) != 0) { if ((MagickRealType) GetPixelBlue(q) < min_threshold) threshold.blue=min_threshold; else if ((MagickRealType) GetPixelBlue(q) > max_threshold) threshold.blue=max_threshold; else threshold.blue=(MagickRealType) (QuantumRange* GetPseudoRandomValue(random_info[id])); } if ((channel & OpacityChannel) != 0) { if ((MagickRealType) GetPixelOpacity(q) < min_threshold) threshold.opacity=min_threshold; else if ((MagickRealType) GetPixelOpacity(q) > max_threshold) threshold.opacity=max_threshold; else threshold.opacity=(MagickRealType) (QuantumRange* GetPseudoRandomValue(random_info[id])); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { if ((MagickRealType) GetPixelIndex(indexes+x) < min_threshold) threshold.index=min_threshold; else if ((MagickRealType) GetPixelIndex(indexes+x) > max_threshold) threshold.index=max_threshold; else threshold.index=(MagickRealType) (QuantumRange* GetPseudoRandomValue(random_info[id])); } if ((channel & RedChannel) != 0) SetPixelRed(q,(MagickRealType) GetPixelRed(q) <= threshold.red ? 0 : QuantumRange); if ((channel & GreenChannel) != 0) SetPixelGreen(q,(MagickRealType) GetPixelGreen(q) <= threshold.green ? 0 : QuantumRange); if ((channel & BlueChannel) != 0) SetPixelBlue(q,(MagickRealType) GetPixelBlue(q) <= threshold.blue ? 0 : QuantumRange); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,(MagickRealType) GetPixelOpacity(q) <= threshold.opacity ? 0 : QuantumRange); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,(MagickRealType) GetPixelIndex(indexes+x) <= threshold.index ? 0 : QuantumRange); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,ThresholdImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); random_info=DestroyRandomInfoThreadSet(random_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W h i t e T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WhiteThresholdImage() is like ThresholdImage() but forces all pixels above % the threshold into white while leaving all pixels at or below the threshold % unchanged. % % The format of the WhiteThresholdImage method is: % % MagickBooleanType WhiteThresholdImage(Image *image,const char *threshold) % MagickBooleanType WhiteThresholdImageChannel(Image *image, % const ChannelType channel,const char *threshold, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel or channels to be thresholded. % % o threshold: Define the threshold value. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType WhiteThresholdImage(Image *image, const char *threshold) { MagickBooleanType status; status=WhiteThresholdImageChannel(image,DefaultChannels,threshold, &image->exception); return(status); } MagickExport MagickBooleanType WhiteThresholdImageChannel(Image *image, const ChannelType channel,const char *thresholds,ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; GeometryInfo geometry_info; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket threshold; MagickStatusType flags; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (thresholds == (const char *) NULL) return(MagickTrue); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); flags=ParseGeometry(thresholds,&geometry_info); GetMagickPixelPacket(image,&threshold); threshold.red=geometry_info.rho; threshold.green=geometry_info.sigma; if ((flags & SigmaValue) == 0) threshold.green=threshold.red; threshold.blue=geometry_info.xi; if ((flags & XiValue) == 0) threshold.blue=threshold.red; threshold.opacity=geometry_info.psi; if ((flags & PsiValue) == 0) threshold.opacity=threshold.red; threshold.index=geometry_info.chi; if ((flags & ChiValue) == 0) threshold.index=threshold.red; if ((flags & PercentValue) != 0) { threshold.red*=(MagickRealType) (QuantumRange/100.0); threshold.green*=(MagickRealType) (QuantumRange/100.0); threshold.blue*=(MagickRealType) (QuantumRange/100.0); threshold.opacity*=(MagickRealType) (QuantumRange/100.0); threshold.index*=(MagickRealType) (QuantumRange/100.0); } if ((IsMagickGray(&threshold) == MagickFalse) && (IsGrayColorspace(image->colorspace) != MagickFalse)) (void) SetImageColorspace(image,sRGBColorspace); /* White threshold image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if (((channel & RedChannel) != 0) && ((MagickRealType) GetPixelRed(q) > threshold.red)) SetPixelRed(q,QuantumRange); if (((channel & GreenChannel) != 0) && ((MagickRealType) GetPixelGreen(q) > threshold.green)) SetPixelGreen(q,QuantumRange); if (((channel & BlueChannel) != 0) && ((MagickRealType) GetPixelBlue(q) > threshold.blue)) SetPixelBlue(q,QuantumRange); if (((channel & OpacityChannel) != 0) && ((MagickRealType) GetPixelOpacity(q) > threshold.opacity)) SetPixelOpacity(q,QuantumRange); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace) && ((MagickRealType) GetPixelIndex(indexes+x)) > threshold.index) SetPixelIndex(indexes+x,QuantumRange); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,ThresholdImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); }
omp_dgemm_batch.c
/** * @file omp_dgemm_batch.c * * @brief BBLAS gemm_batch double routine. * * BBLAS is a software package provided by Univ. of Manchester, * Univ. of Tennessee. * * @version 1.0.0 * @author Samuel D. Relton * @author Pedro V. Lara * @author Mawussi Zounon * @date 2016-02-20 * **/ #ifndef DOXYGEN_SHOULD_SKIP_THIS /** * Code generation * @generated from ./bblas_omp/omp_zgemm_batch.c normal z -> d, Mon Jun 6 09:44:14 2016 **/ #endif #include<cblas.h> #include "bblas_domp.h" #include "bblas.h" #include <omp.h> #define REAL /** Purpose ------- <b>omp_dgemm_batch</b> is an OpenMP version of dgemm_batch. It performs the matrix-matrix operations arrayC[i] = alpha[i]*op( arrayA[i] )*op( arrayB[i] ) + beta[i]*arrayC[i], where op( X ) is one of op( X ) = X or op( X ) = X**T or op( X ) = X**H, alpha[i] and beta[i] are scalars, and arrayA[i], arrayB[i] and C are matrices, with op( arrayA[i] ) an m by k matrix, op( arrayB[i] ) a k by n matrix and arrayC[i] an m by n matrix. Fixed and Variable Batch Operations ----------------------------------- Two types of batch operation are supported depending upon the value of batch_opts. When <tt>batch_opts = BBLAS_VARIABLE</tt> - all parameters that are arrays must have length at least batch_count. - all parameters that are arrays must have all values set. When <tt>batch_opts = BBLAS_FIXED</tt> - all parameters that are arrays (except for arrayA, arrayB, arrayC, and info) must have length at least one. - all parameters that are arrays (except for arrayA, arrayB, arrayC, and info) need only to have their first value set. This means that for a <tt>BBLAS_FIXED</tt> batch, the values of transA[0], transB[0], M[0], N[0], K[0], alpha[0], beta[0], lda[0], ldb[0], and ldc[0] are used for all computations. Parameters ---------- @param[in] transA Array of <tt>enum BBLAS_TRANS</tt>. On entry, transA[i] specifies the form of op( arrayA[i] ) to be used in the matrix multiplication as follows: - = BblasNoTrans: op( arrayA[i] ) = arrayA[i]. - = BblasTrans: op( arrayA[i] ) = arrayA[i]**T. - = BblasConjTrans: op( arrayA[i] ) = arrayA[i]**H. @param[in] transB Array of <tt>enum BBLAS_TRANS</tt>. On entry, transB[i] specifies the form of op( arrayB[i] ) to be used in the matrix multiplication as follows: - = BblasNoTrans: op( arrayB[i] ) = arrayB[i]. - = BblasTrans: op( arrayB[i] ) = arrayB[i]**T. - = BblasConjTrans: op( arrayB[i] ) = arrayB[i]**H. @param[in] M Array of <tt>int</tt>. Each element M[i] specifies the number of rows of the matrix op( arrayA[i] ) and of the matrix arrayC[i]. M[i] must be greater than zero. @param[in] N Array of <tt>int</tt>. Each element N[i] specifies the number of columns of the matrix op( arrayB[i] ) and the number of columns of the matrix arrayC[i]. N[i] must be greater than zero. @param[in] K Array of <tt>int</tt>. Each element K[i] specifies the number of columns of the matrix op( arrayA[i] ) and the number of rows of the matrix op( arrayB[i] ). K[i] must be greater than zero. @param[in] alpha Array of <tt>real_16</tt>. @param[in] arrayA Array of pointers. Each element arrayA[i] is a pointer to a DOUBLE PRECISION matrix of dimension lda[i] by Ka[i], where Ka[i] is K[i] when transA[i] = BblasNoTrans, and is M[i] otherwise. When using transA[i] = BblasNoTrans the leading M[i] by K[i] part of arrayA[i] must contain the matrix elements, otherwise the leading K[i] by M[i] part of arrayA[i] must contain the matrix elements. @param[in] lda Array of <tt>int</tt>. Each element lda[i] specifies the first dimension of arrayA[i] as declared in the calling (sub) program. When transA[i] = BblasNoTrans then lda[i] must be at least max( 1, M[i] ), otherwise lda[i] must be at least max( 1, K[i] ). @param[in] arrayB Array of pointers. Each element arrayB[i] is a pointer to a DOUBLE PRECISION matrix of dimension ldb[i] by Kb[i], where Kb[i] is N[i] when transB[i] = BblasNoTrans, and is K[i] otherwise. When using transB[i] = BblasNoTrans the leading K[i] by N[i] part of arrayB[i] must contain the matrix elements, otherwise the leading N[i] by K[i] part of arrayB[i] must contain the matrix elements. @param[in] ldb Array of <tt>int</tt>. Each element ldb[i] specifies the first dimension of arrayB[i] as declared in the calling (sub) program. When transB[i] = BblasNoTrans then ldb[i] must be at least max( 1, K[i] ), otherwise ldb[i] must be at least max( 1, N[i] ). @param[in] beta Array of <tt>real_16</tt>. When beta[i] is set to zero arrayC[i] need not be set on input. @param[in,out] arrayC Array of pointers. Each element arrayC[i] is a pointer to a DOUBLE PRECISION matrix of dimension ldc[i] by N[i]. Before entry, the leading M[i] by N[i] part of the arrayC[i] must contain a matrix C, except when beta is zero, in which case C need not be set on entry. On exit, the matrix arrayC[i] is overwritten by the M[i] by N[i] matrix ( alpha[i]*op( arrayA[i] )*op( arrayB[i] ) + beta[i]*arrayC[i] ). @param[in] ldc Array of <tt>int</tt>. Each element ldc[i] specifies the first dimension of arrayC[i] as declared in the calling (sub) program. The value ldc[i] must be at least max( 1, M[i] ) @param[in] batch_count <tt>int</tt> The number of matrices to operate on. @param[in] batch_opts <tt>enum BBLAS_OPTS</tt> One of BBLAS_FIXED or BBLAS_VARIABLE depending upon the type of batch operation required. @param[in,out] info Array of <tt>int</tt>. Each element info[i] is the error return code of the ith dgemm in the batch, these need not be set on entry. The error codes can be found in bblas_macros.h. **/ void omp_dgemm_batch( const enum BBLAS_TRANS *transA, const enum BBLAS_TRANS *transB, const int *M, const int *N, const int *K, const double *alpha, const double **arrayA, const int *lda, const double **arrayB, const int *ldb, const double *beta, double **arrayC, const int *ldc, const int batch_count, enum BBLAS_OPTS batch_opts, int *info) { /*Local variables */ int first_index = 0; int LDA, LDB, batch_iter; char func_name[15] = "dgemm_batch"; /* Check input arguments */ if (batch_count < 0) { xerbla_batch(func_name, BBLAS_ERR_BATCH_COUNT, -1); } if (batch_opts == BBLAS_FIXED) { if ((transA[first_index] != BblasNoTrans) && (transA[first_index] != BblasTrans) && (transA[first_index] != BblasConjTrans)) { xerbla_batch(func_name, BBLAS_ERR_TRANSA, first_index); for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { info[batch_iter] = BBLAS_ERR_TRANSA; } return; } if ((transB[first_index] != BblasNoTrans) && (transB[first_index] != BblasTrans) && (transB[first_index] != BblasConjTrans)) { xerbla_batch(func_name, BBLAS_ERR_TRANSB, first_index); for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { info[batch_iter] = BBLAS_ERR_TRANSB; } return; } if ( transA[first_index] == BblasNoTrans ) { LDA = M[first_index]; } else { LDA = K[first_index]; } if ( transB[first_index] == BblasNoTrans ) { LDB = K[first_index]; } else { LDB = N[first_index]; } if (M[first_index] < 0) { xerbla_batch(func_name, BBLAS_ERR_M, first_index); for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { info[batch_iter] = BBLAS_ERR_M; } return; } if (N[first_index] < 0) { xerbla_batch(func_name, BBLAS_ERR_N, first_index); for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { info[batch_iter] = BBLAS_ERR_N; } return; } if (K[first_index] < 0) { xerbla_batch(func_name, BBLAS_ERR_K, first_index); for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { info[batch_iter] = BBLAS_ERR_K; } return; } if (lda[first_index] < max(1, LDA)) { xerbla_batch(func_name, BBLAS_ERR_LDA, first_index); for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { info[batch_iter] = BBLAS_ERR_LDA; } return; } if (ldb[first_index] < max(1, LDB)) { xerbla_batch(func_name, BBLAS_ERR_LDB, first_index); for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { info[batch_iter] = BBLAS_ERR_LDB; } return; } if (ldc[first_index] < max(1, M[first_index])) { xerbla_batch(func_name, BBLAS_ERR_LDC, first_index); for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { info[batch_iter] = BBLAS_ERR_LDC; } return; } /* particular case */ if (M[first_index] == 0 || N[first_index] == 0 || ((alpha[first_index] == (double)0.0 || K[first_index] == 0) && beta[first_index] == (double)1.0 )) { for (batch_iter = 0; batch_iter < batch_count; batch_iter++) { info[batch_iter] = BBLAS_SUCCESS; } return; } #pragma omp parallel for for (int batch_iter_omp = 0; batch_iter_omp < batch_count; batch_iter_omp++) { /*Call to cblas_dgemm */ cblas_dgemm( BblasColMajor, transA[first_index], transB[first_index], M[first_index], N[first_index], K[first_index], (alpha[first_index]), arrayA[batch_iter_omp], lda[first_index], arrayB[batch_iter_omp], ldb[first_index], (beta[first_index]), arrayC[batch_iter_omp], ldc[first_index]); /* Successful */ info[batch_iter_omp] = BBLAS_SUCCESS; } /*END FIXED SIZE FOR LOOP */ }else if (batch_opts == BBLAS_VARIABLE) { #pragma omp parallel for private(LDA, LDB) for (int batch_iter_omp = 0; batch_iter_omp < batch_count; batch_iter_omp++) { /* Check input arguments */ if ((transA[batch_iter_omp] != BblasNoTrans) && (transA[batch_iter_omp] != BblasTrans) && (transA[batch_iter_omp] != BblasConjTrans)) { xerbla_batch(func_name, BBLAS_ERR_TRANSA, batch_iter_omp); info[batch_iter_omp] = BBLAS_ERR_TRANSA; continue; } if ((transB[batch_iter_omp] != BblasNoTrans) && (transB[batch_iter_omp] != BblasTrans) && (transB[batch_iter_omp] != BblasConjTrans)) { xerbla_batch(func_name, BBLAS_ERR_TRANSB, batch_iter_omp); info[batch_iter_omp] = BBLAS_ERR_TRANSB; continue; } if (transA[batch_iter_omp] == BblasNoTrans) { LDA = M[batch_iter_omp]; } else { LDA = K[batch_iter_omp]; } if (transB[batch_iter_omp] == BblasNoTrans) { LDB = K[batch_iter_omp]; } else { LDB = N[batch_iter_omp]; } if (M[batch_iter_omp] < 0) { xerbla_batch(func_name, BBLAS_ERR_M, batch_iter_omp); info[batch_iter_omp] = BBLAS_ERR_M; continue; } if (N[batch_iter_omp] < 0) { xerbla_batch(func_name, BBLAS_ERR_N, batch_iter_omp); info[batch_iter_omp] = BBLAS_ERR_N; continue; } if (K[batch_iter_omp] < 0) { xerbla_batch(func_name, BBLAS_ERR_K, batch_iter_omp); info[batch_iter_omp] = BBLAS_ERR_K; continue; } if (lda[batch_iter_omp] < max(1, LDA)) { xerbla_batch(func_name, BBLAS_ERR_LDA, batch_iter_omp); info[batch_iter_omp] = BBLAS_ERR_LDA; continue; } if (ldb[batch_iter_omp] < max(1, LDB)) { xerbla_batch(func_name, BBLAS_ERR_LDB, batch_iter_omp); info[batch_iter_omp] = BBLAS_ERR_LDB; continue; } if (ldc[batch_iter_omp] < max(1, M[batch_iter_omp])) { xerbla_batch(func_name, BBLAS_ERR_LDC, batch_iter_omp); info[batch_iter_omp] = BBLAS_ERR_LDC; continue; } /* particular case */ if (M[batch_iter_omp] == 0 || N[batch_iter_omp] == 0 || ((alpha[batch_iter_omp] == (double)0.0 || K[batch_iter_omp] == 0) && beta[batch_iter_omp] == (double)1.0)) { info[batch_iter_omp] = BBLAS_SUCCESS; continue; } cblas_dgemm( BblasColMajor, transA[batch_iter_omp], transB[batch_iter_omp], M[batch_iter_omp], N[batch_iter_omp], K[batch_iter_omp], (alpha[batch_iter_omp]), arrayA[batch_iter_omp], lda[batch_iter_omp], arrayB[batch_iter_omp], ldb[batch_iter_omp], (beta[batch_iter_omp]), arrayC[batch_iter_omp], ldc[batch_iter_omp]); /* Successful */ info[batch_iter_omp] = BBLAS_SUCCESS; } } else { xerbla_batch(func_name, BBLAS_ERR_BATCH_OPTS, -1); } } #undef REAL
scheduled_clauseModificado2.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #ifdef _OPENMP #include <omp.h> #else #define omp_get_thread_num() 0 #endif int main(int argc, char const *argv[]) { int i, n = 200, chunk, a[n], suma=0; if (argc < 2) { fprintf(stderr, "\nFalta iteraciones o chunk\n"); exit(-1); } n = atoi(argv[1]); if (n>200) n = 200; chunk = atoi(argv[2]); for (i = 0; i < n; i++) a[i] = i; printf("dyn-var: %d\n",omp_get_dynamic()); printf("nthreads-var: %d\n", omp_get_max_threads()); omp_sched_t schedule_type; int chunk_size; omp_get_schedule(&schedule_type, &chunk_size); printf("run-sched-var:\n"); if (schedule_type == omp_sched_static) printf("\tomp_sched_static\n"); else if (schedule_type == omp_sched_dynamic) printf("\tomp_sched_dynamic\n"); else if (schedule_type == omp_sched_guided) printf("\tomp_sched_guided\n"); else /*if (schedule_type == omp_sched_auto)*/ printf("\tomp_sched_auto\n"); printf("\tchunk: %d\n", chunk_size); int set_dyn; do { printf("Introduce dyn-var: "); scanf("%i",&set_dyn); omp_set_dynamic(set_dyn); } while(set_dyn > 1); int n_threads; printf("Introduce nthreads-var: "); scanf("%i",&n_threads); omp_set_num_threads(n_threads); char sched_t[20]; printf("Introduce schedule_type: "); scanf("%s",sched_t); printf("Introduce chunk_size: "); scanf("%i",&chunk_size); if (strcmp(sched_t, "omp_sched_static") == 0) schedule_type = omp_sched_static; else if (strcmp(sched_t, "omp_sched_dynamic") == 0) schedule_type = omp_sched_dynamic; else if (strcmp(sched_t, "omp_sched_guided") == 0) schedule_type = omp_sched_guided; else if (sched_t == "omp_sched_auto") schedule_type = omp_sched_auto; omp_set_schedule(schedule_type, chunk_size); #pragma omp parallel for firstprivate(suma)\ lastprivate(suma) schedule(dynamic, chunk) for (i=0; i<n; i++) { suma = suma+a[i]; printf("thread %d suma a[%d] suma=%d\n", omp_get_thread_num(), i, suma); } printf("Fuera de 'parallel for' suma = %d\n", suma); printf("dyn-var: %d\n",omp_get_dynamic()); printf("nthreads-var: %d\n", omp_get_max_threads()); omp_get_schedule(&schedule_type, &chunk_size); printf("run-sched-var:\n"); if (schedule_type == omp_sched_static) printf("\tomp_sched_static\n"); else if (schedule_type == omp_sched_dynamic) printf("\tomp_sched_dynamic\n"); else if (schedule_type == omp_sched_guided) printf("\tomp_sched_guided\n"); else /*if (schedule_type == omp_sched_auto)*/ printf("\tomp_sched_auto\n"); printf("\tchunk: %d\n", chunk_size); return 0; }
hybrid_whereami.c
/* Program hybrid_whereami reports the mask for each OMP thread for each MPI process, and works for nsec seconds (10). This allows one to inspect occupation through utilities like top (e.g. execute top, then hit the 1 key). Uses maskeraid utilities github.com/TACC/maskeraid mpi_report_mask(): in pure MPI region to report MPI process masks hybrid_report_mask(): in OpenMP parallel region to report thread masks map_to_cpuid( cpuid ): sets thread affinity to cpu_id (see /proc/cpuinfo, or hwloc) load_cpu_nsec(nsec): loads the cpu for nsec (default 10) hybrid_whereami.c is a driver for: 1.) Get line arguments (optional): help or number of seconds for load 2.) Start MPI Affinity for MPI processes can be reset here. mpi_report_mask() reports MPI process masks 3.) Start OpenMP parallel region hybrid_report_mask() reports masks for each thread of each MPI process. 4.) Set a work load on each thread 5.) Finish parallel region 6.) Stop MPI Kent Milfeld 12/16/15 Update to separate require a single call for OpenMP hybrid. Uses multi-threaded MPI initialization Kent Milfeld 2015/07/13 */ #include <stdio.h> #include <omp.h> #include <mpi.h> #include <unistd.h> #include <stdlib.h> #include "opts.h" void load_cpu_nsec(int nsec); void hybrid_report_mask(void); int map_to_cpuid( int icore); void mpi_report_mask(void); int main(int argc, char **argv){ int rank, nranks; // MPI variables. int nthrds, thrd, cpuid; //Thread info int requested=MPI_THREAD_MULTIPLE, provided; int nsec = 10; // Load, default time int ierr; // Error number // cmdln_get_nsec_or_help( &nsec, argc, argv); //optional, get nsec from cmd line Maskopts opts(argc,argv); // thread safe init replaces MPI_Init(&argc, &argv); MPI_Init_thread(&argc, &argv, requested, &provided); MPI_Comm_size(MPI_COMM_WORLD, &nranks); MPI_Comm_rank(MPI_COMM_WORLD, &rank); mpi_report_mask(); // Report JUST MPI process masks #pragma omp parallel private(thrd,nthrds,ierr) { thrd = omp_get_thread_num(); nthrds = omp_get_num_threads(); // cpuid = thrd; // set cpuid to thread number (thrd) // ierr = map_to_cpuid( cpuid ); // set your own affinity here hybrid_report_mask(); // Call mask reporter load_cpu_nsec( nsec ); // Load up rank process so user can watch top. } MPI_Finalize(); }
EmbeddingBag.h
/****************************************************************************** * Copyright (c) Intel Corporation - All rights reserved. * * This file is part of the LIBXSMM library. * * * * For information on the license, see the LICENSE file. * * Further information: https://github.com/libxsmm/libxsmm/ * * SPDX-License-Identifier: BSD-3-Clause * ******************************************************************************/ /* Dhiraj Kalamkar, Evangelos Georganas (Intel Corp.) ******************************************************************************/ #if defined(USE_LIBXSMM_JIT) #include <libxsmm.h> #endif #include "utils.h" #include "rtm.h" template <typename T> class EmbeddingBagImpl { public: EmbeddingBagImpl(long M, long E) : M(M), E(E) { #ifdef USE_LIBXSMM_JIT libxsmm_meltw_unary_shape unary_shape_f32 = libxsmm_create_meltw_unary_shape( E, 0, &_ld, &_ld, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32 ); libxsmm_meltw_unary_shape unary_shape_f16 = libxsmm_create_meltw_unary_shape( E, 0, &_ld, &_ld, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32 ); libxsmm_meltw_binary_shape binary_shape_f32 = libxsmm_create_meltw_binary_shape( E, 1, &_ld, &_ld, &_ld, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32 ); weight_ = (T*)my_malloc((size_t)M * E * sizeof(T), alignment); _ld = E; if (sizeof(T) == 4) { kernel = libxsmm_dispatch_meltw_unary_v2( LIBXSMM_MELTW_TYPE_UNARY_REDUCE_COLS_IDX, unary_shape_f32, (sizeof(long) == 8) ? LIBXSMM_MELTW_FLAG_UNARY_IDX_SIZE_8BYTES : LIBXSMM_MELTW_FLAG_UNARY_IDX_SIZE_4BYTES ); } else { kernel = libxsmm_dispatch_meltw_unary_v2( LIBXSMM_MELTW_TYPE_UNARY_REDUCE_COLS_IDX, unary_shape_f16, (sizeof(long) == 8) ? LIBXSMM_MELTW_FLAG_UNARY_IDX_SIZE_8BYTES : LIBXSMM_MELTW_FLAG_UNARY_IDX_SIZE_4BYTES ); } kernel1 = libxsmm_dispatch_meltw_unary_v2( LIBXSMM_MELTW_TYPE_UNARY_REPLICATE_COL_VAR, unary_shape_f32, LIBXSMM_MELTW_FLAG_UNARY_NONE ); kernel2 = libxsmm_dispatch_meltw_binary_v2( LIBXSMM_MELTW_TYPE_BINARY_MULADD, binary_shape_f32, LIBXSMM_MELTW_FLAG_BINARY_BCAST_SCALAR_IN_0 ); #endif } ~EmbeddingBagImpl() { my_free(weight_); weight_ = 0; } void init(T low = -0.1, T high = 0.1) { init_random(M * E, weight_, low, high); } #ifdef USE_LIBXSMM_JIT void forward(long N, long NS, const long *offsets, const long *indices, T *output_) { T(*__restrict weight)[E] = (T(*)[*])weight_; T(*__restrict output)[E] = (T(*)[*])output_; #pragma omp parallel for for (int n = 0; n < N; n++) { libxsmm_meltw_unary_param params; auto start = offsets[n]; auto end = (n < N - 1 ? offsets[n + 1] : NS); unsigned long long __n = end-start; params.in.primary = weight; params.in.secondary = (void*)&indices[start]; params.in.tertiary = &__n; params.out.primary = &output[n][0]; kernel( &params ); } } #else void forward(long N, long NS, const long *offsets, const long *indices, T *output_) { T(*__restrict weight)[E] = (T(*)[*])weight_; T(*__restrict output)[E] = (T(*)[*])output_; #pragma omp parallel for for (long n = 0; n < N; n++) { auto start = offsets[n]; auto end = (n < N - 1 ? offsets[n + 1] : NS); #pragma omp simd for (long v = 0; v < E; v++) output[n][v] = 0; for (long s = start; s < end; s++) { auto ind = indices[s]; #pragma omp simd for (long v = 0; v < E; v++) { output[n][v] += weight[ind][v]; } } } } #endif #ifdef USE_LIBXSMM_JIT void backward(long N, long NS, const T *gradout_, const long *offsets, const long *indices, T *values_) { T(*__restrict gradout)[E] = (T(*)[*])gradout_; T(*__restrict values)[E] = (T(*)[*])values_; int _ld = E; #pragma omp parallel for for (long n = 0; n < N; n++) { libxsmm_meltw_unary_param unary_param; auto start = offsets[n]; auto end = (n < N - 1 ? offsets[n + 1] : NS); unsigned long long _N = end-start; unary_param.in.primary = (void*)&gradout[n][0]; unary_param.out.primary = (void*)&values[start][0]; unary_param.op.primary = (void*)&_N; kernel1(&unary_param); } } #else void backward(long N, long NS, const T *gradout_, const long *offsets, const long *indices, T *values_) { T(*__restrict gradout)[E] = (T(*)[*])gradout_; T(*__restrict values)[E] = (T(*)[*])values_; #pragma omp parallel for for (long n = 0; n < N; n++) { auto start = offsets[n]; auto end = (n < N - 1 ? offsets[n + 1] : NS); for (long s = start; s < end; s++) { #pragma omp simd #ifdef STREAMING_WRITES #pragma vector nontemporal(values) #endif for (long v = 0; v < E; v++) values[s][v] = gradout[n][v]; } } } #endif #ifdef USE_LIBXSMM_JIT void update(long NS, const T *grads_, const long *indices, float lr, long M, int use_rtm) { int use_lock_free = use_rtm == 0 ? 1: 0; T(*__restrict weight)[E] = (T(*)[*])weight_; T(*__restrict grads)[E] = (T(*)[*])grads_; int _ld = E; if(use_lock_free) { /*printf("Using lock free update\n");*/ int max_thr = omp_get_max_threads(); if(M < max_thr) max_thr = M; #pragma omp parallel num_threads(max_thr) { int tid = omp_get_thread_num(); for(long i = 0; i < NS; i++) { auto ind = indices[i]; if(ind % max_thr == tid) { libxsmm_meltw_binary_param binary_param; binary_param.in0.primary = (void*)&lr; binary_param.in1.primary = (void*)&grads[i][0]; binary_param.out.primary = (void*)&weight[ind][0]; { kernel2(&binary_param); } } } } } else { SimpleSpinLock fallBackLock; #pragma omp parallel for for (long i = 0; i < NS; i++) { libxsmm_meltw_binary_param binary_param; long ind = indices[i]; binary_param.in0.primary = (void*)&lr; binary_param.in1.primary = (void*)&grads[i][0]; binary_param.out.primary = (void*)&weight[ind][0]; { TransactionScope guard(fallBackLock, 100, 0); kernel2(&binary_param); } } } } #else void update(long NS, const T *grads_, const long *indices, float lr, long M, int use_rtm) { T(*__restrict weight)[E] = (T(*)[*])weight_; T(*__restrict grads)[E] = (T(*)[*])grads_; int use_lock_free = use_rtm == 0 ? 1: 0; if(use_lock_free) { int max_thr = omp_get_max_threads(); if(M < max_thr) max_thr = M; #pragma omp parallel num_threads(max_thr) { int tid = omp_get_thread_num(); for(long i = 0; i < NS; i++) { auto ind = indices[i]; if(ind % max_thr == tid) { #pragma omp simd for (long v = 0; v < E; v++) weight[ind][v] += lr * grads[i][v]; } } } } else { SimpleSpinLock fallBackLock; #pragma omp parallel for for (long i = 0; i < NS; i++) { long ind = indices[i]; { TransactionScope guard(fallBackLock, 100, 0); #pragma omp simd for (long v = 0; v < E; v++) weight[ind][v] += lr * grads[i][v]; } } } } #endif T *weight_; long M; long E; #ifdef USE_LIBXSMM_JIT int _ld; libxsmm_meltwfunction_unary kernel; libxsmm_meltwfunction_unary kernel1; libxsmm_meltwfunction_binary kernel2; #endif };
Cmfd.h
/** * @file Cmfd.h * @brief The Cmfd class. * @date October 14, 2013 * @author Sam Shaner, MIT, Course 22 (shaner@mit.edu) */ #ifndef CMFD_H_ #define CMFD_H_ #ifdef __cplusplus #define _USE_MATH_DEFINES #ifdef SWIG #include "Python.h" #endif #include "log.h" #include "constants.h" #include "Universe.h" #include "Track.h" #include "Track3D.h" #include "Quadrature.h" #include "linalg.h" #include "Geometry.h" #include "Timer.h" #endif /** Optimization macro for 3D calculations to avoid branch statements */ #ifdef THREED #define _SOLVE_3D (true) #endif /** Forward declaration of Geometry class */ class Geometry; /** Comparator for sorting k-nearest stencil std::pair objects */ inline bool stencilCompare(const std::pair<int, double>& firstElem, const std::pair<int, double>& secondElem) { return firstElem.second < secondElem.second; } #undef track_flux /** Indexing macro for the angular fluxes for each polar angle and energy * group for either the forward or reverse direction for a given Track */ #define track_flux(p,e) (track_flux[(p)*_num_moc_groups + (e)] /** * @class Cmfd Cmfd.h "src/Cmfd.h" * @brief A class for Coarse Mesh Finite Difference (CMFD) acceleration. */ class Cmfd { private: /** Pointer to polar quadrature object */ Quadrature* _quadrature; /** Pointer to geometry object */ Geometry* _geometry; /** The keff eigenvalue */ double _k_eff; /** The A (destruction) matrix */ Matrix* _A; /** The M (production) matrix */ Matrix* _M; /** The old source vector */ Vector* _old_source; /** The new source vector */ Vector* _new_source; /* Domain boundary communication buffers */ CMFD_PRECISION*** _boundary_volumes; CMFD_PRECISION*** _boundary_reaction; CMFD_PRECISION*** _boundary_diffusion; CMFD_PRECISION*** _old_boundary_flux; CMFD_PRECISION*** _boundary_surface_currents; CMFD_PRECISION*** _send_volumes; CMFD_PRECISION*** _send_reaction; CMFD_PRECISION*** _send_diffusion; CMFD_PRECISION*** _send_currents; CMFD_PRECISION* _send_split_current_data; CMFD_PRECISION* _receive_split_current_data; CMFD_PRECISION** _send_split_currents_array; CMFD_PRECISION** _receive_split_currents_array; CMFD_PRECISION*** _off_domain_split_currents; CMFD_PRECISION*** _received_split_currents; /** Vector representing the flux for each cmfd cell and cmfd energy group at * the end of a CMFD solve */ Vector* _new_flux; /** Vector representing the flux for each cmfd cell and cmfd energy group at * the beginning of a CMFD solve */ Vector* _old_flux; /** The corrected diffusion coefficients from the previous iteration */ Vector* _old_dif_surf_corr; /** Whether the old diffusion coefficient has been set */ bool _old_dif_surf_valid; /** Gauss-Seidel SOR relaxation factor */ double _SOR_factor; /** cmfd source convergence threshold */ double _source_convergence_threshold; /** Number of cells in x direction */ int _num_x; /** Number of cells in y direction */ int _num_y; /** Number of cells in z direction */ int _num_z; /** Sweep number on MOC side */ int _moc_iteration; /** Number of energy groups */ int _num_moc_groups; /** Number of polar angles */ int _num_polar; /** Number of azimuthal angles */ int _num_azim; /** Number of energy groups used in cmfd solver. Note that cmfd supports * energy condensation from the MOC */ int _num_cmfd_groups; /** Coarse energy indices for fine energy groups */ int* _group_indices; /** Map of MOC groups to CMFD groups */ int* _group_indices_map; /** Number of energy groups in the backup CMFD solver */ int _num_backup_groups; /** Map of MOC groups to backup CMFD group structure */ std::vector< std::vector<int> > _backup_group_structure; /** Map of CMFD groups to backup CMFD group structure */ int* _cmfd_group_to_backup_group; /** If the user specified fine-to-coarse group indices */ bool _user_group_indices; /** If a linear source approximation is used */ bool _linear_source; /** If diffusion coefficients are limited by the flux */ bool _flux_limiting; /** Whether to rebalance the computed sigma-t to be consistent with the MOC * solution on every sweep */ bool _balance_sigma_t; /** Number of FSRs */ long _num_FSRs; /** The volumes (areas) for each FSR */ FP_PRECISION* _FSR_volumes; /** Pointers to Materials for each FSR */ Material** _FSR_materials; /** The FSR scalar flux in each energy group */ FP_PRECISION* _FSR_fluxes; /** The FSR source in each energy group */ FP_PRECISION* _FSR_sources; /** The source region flux moments (x, y, and z) for each energy group */ FP_PRECISION* _flux_moments; /** Array of CMFD cell volumes */ Vector* _volumes; /** Array of material pointers for CMFD cell materials */ Material** _materials; /** Physical dimensions of the geometry and each CMFD cell */ double _width_x; double _width_y; double _width_z; double _cell_width_x; double _cell_width_y; double _cell_width_z; /** Physical dimensions of non-uniform CMFD meshes (for whole geometry) */ std::vector<double> _cell_widths_x; std::vector<double> _cell_widths_y; std::vector<double> _cell_widths_z; /** Distance of each mesh from the left-lower-bottom most point */ std::vector<double> _accumulate_x; std::vector<double> _accumulate_y; std::vector<double> _accumulate_z; /** True if the cmfd meshes are non-uniform */ bool _non_uniform; /** True if the cmfd mesh has been adjusted to fit the domain decomposition */ bool _widths_adjusted_for_domains; /** Array of geometry boundaries */ boundaryType* _boundaries; /** Array of surface currents for each CMFD cell */ Vector* _surface_currents; /** Array of total current from starting boundary fluxes */ Vector* _starting_currents; /** Array of net currents of all CMFD cells */ Vector* _net_currents; /** Array of surface currents on all faces + edges and corners used in debugging */ Vector* _full_surface_currents; /** Array of surface currents on edges and corners for each CMFD cell */ std::map<int, CMFD_PRECISION> _edge_corner_currents; /** Vector of vectors of FSRs containing in each cell */ std::vector< std::vector<long> > _cell_fsrs; /** Pointer to Lattice object representing the CMFD mesh */ Lattice* _lattice; /** Flag indicating whether to update the MOC flux */ bool _flux_update_on; /** Flag indicating whether to use centroid updating */ bool _centroid_update_on; /** Flag indicating whether to check neutron balance on every CMFD solve */ bool _check_neutron_balance; /** Whether to allow the CMFD solver to work with / return negative fluxes */ bool _negative_fluxes_allowed; /** Number of MOC iterations before the CMFD update ratios are limited */ int _num_unbounded_iterations; /** Number of cells to use in updating MOC flux */ int _k_nearest; /** Relaxation factor to use for corrected diffusion coefficients */ double _relaxation_factor; /** Map storing the k-nearest stencil for each fsr */ std::map<long, std::vector< std::pair<int, double> > > _k_nearest_stencils; /** OpenMP mutual exclusion locks for atomic CMFD cell operations */ omp_lock_t* _cell_locks; /** OpenMP mutual exclusion lock for edge/corner current tallies */ omp_lock_t _edge_corner_lock; #ifndef THREED /** Flag indicating whether the problem is 2D or 3D */ bool _SOLVE_3D; #endif /** Array of azimuthal track spacings */ double* _azim_spacings; /** 2D array of polar track spacings */ double** _polar_spacings; /** Whether to use axial interpolation for flux update ratios */ int _use_axial_interpolation; /** Axial interpolation constants */ std::vector<double*> _axial_interpolants; /* Structure to contain information about the convergence of the CMFD */ ConvergenceData* _convergence_data; /* MPI communicator to transfer buffers, mainly currents at interfaces */ DomainCommunicator* _domain_communicator; /* Buffer to contain received data */ CMFD_PRECISION* _inter_domain_data; /* Buffer to contain sent data from domain */ CMFD_PRECISION* _send_domain_data; /* For each face (1st dimension of the array), will contain data received */ CMFD_PRECISION** _domain_data_by_surface; /* For each face (1st dimension of the array), will contain data to send */ CMFD_PRECISION** _send_data_by_surface; /* Map of the indexes to each boundary in the tally arrays */ std::vector<std::map<int, int> > _boundary_index_map; /* The number of on-domain cells in the x-direction */ int _local_num_x; /* The number of on-domain cells in the y-direction */ int _local_num_y; /* The number of on-domain cells in the z-direction */ int _local_num_z; std::vector<int> _accumulate_lmx; std::vector<int> _accumulate_lmy; std::vector<int> _accumulate_lmz; /* Size of _tally_memory array */ long _total_tally_size; /* 1D array that contains all tallies (diffusion, reaction and volume) */ CMFD_PRECISION* _tally_memory; /* 2D array that contains reaction rates in each cell and group */ CMFD_PRECISION** _reaction_tally; /* 2D array that contains volume tallies of each cell */ CMFD_PRECISION** _volume_tally; /* 2D array that contains diffusion tallies for each cell and groups */ CMFD_PRECISION** _diffusion_tally; /* Boolean to check if tallies are allocated */ bool _tallies_allocated; /* Boolean to check if the domain communicator (for domain decomposed CMFD) * has been allocated */ bool _domain_communicator_allocated; /** A timer to record timing data for a simulation */ Timer* _timer; /** A one-group backup CMFD solver */ Cmfd* _backup_cmfd; /* Private worker functions */ CMFD_PRECISION computeLarsensEDCFactor(CMFD_PRECISION dif_coef, CMFD_PRECISION delta); void constructMatrices(); void collapseXS(); void updateMOCFlux(); void rescaleFlux(); void splitVertexCurrents(); void splitEdgeCurrents(); void getVertexSplitSurfaces(int cell, int vertex, std::vector<int>* surfaces); void getEdgeSplitSurfaces(int cell, int edge, std::vector<int>* surfaces); void initializeMaterials(); void initializeCurrents(); void generateKNearestStencils(); int convertDirectionToSurface(int* direction); void convertSurfaceToDirection(int surface, int* direction); std::string getSurfaceNameFromDirection(int* direction); std::string getSurfaceNameFromSurface(int surface); /* Private getter functions */ int getCellNext(int cell_id, int surface_id, bool global=true, bool neighbor=false); int getCellByStencil(int cell_id, int stencil_id); CMFD_PRECISION getFluxRatio(int cell_id, int group, long fsr); CMFD_PRECISION getUpdateRatio(int cell_id, int moc_group, long fsr); double getDistanceToCentroid(Point* centroid, int cell_id, int local_cell_id, int stencil_index); void getSurfaceDiffusionCoefficient(int cmfd_cell, int surface, int group, CMFD_PRECISION& dif_surf, CMFD_PRECISION& dif_surf_corr); CMFD_PRECISION getDiffusionCoefficient(int cmfd_cell, int group); CMFD_PRECISION getSurfaceWidth(int surface, int global_ind); CMFD_PRECISION getPerpendicularSurfaceWidth(int surface, int global_ind); int getSense(int surface); int getLocalCMFDCell(int cmfd_cell); //TODO: optimize, document int getGlobalCMFDCell(int cmfd_cell); //TODO: optimize, document int getCellColor(int cmfd_cell); //TODO: optimize, document void packBuffers(); #ifdef MPIx void ghostCellExchange(); void communicateSplits(bool faces); #endif void unpackSplitCurrents(bool faces); void copyFullSurfaceCurrents(); void checkNeutronBalance(bool pre_split=true, bool old_source=false); void printProlongationFactors(); public: Cmfd(); virtual ~Cmfd(); /* Worker functions */ double computeKeff(int moc_iteration); void initialize(); void initializeCellMap(); void initializeGroupMap(); void allocateTallies(); void initializeLattice(Point* offset, bool is_2D=false); void initializeBackupCmfdSolver(); void copyCurrentsToBackup(); int findCmfdCell(LocalCoords* coords); int findCmfdSurface(int cell_id, LocalCoords* coords); int findCmfdSurfaceOTF(int cell_id, double z, int surface_2D); void addFSRToCell(int cell_id, long fsr_id); void zeroCurrents(); void tallyCurrent(segment* curr_segment, float* track_flux, int azim_index, int polar_index, bool fwd); void tallyStartingCurrent(Point* point, double delta_x, double delta_y, double delta_z, float* track_flux, double weight); void recordNetCurrents(); void printInputParamsSummary(); void printTimerReport(); void checkBalance(); /* Get parameters */ int getNumCmfdGroups(); int getNumMOCGroups(); int getNumCells(); int getCmfdGroup(int group); int getBoundary(int side); Lattice* getLattice(); int getNumX(); int getNumY(); int getNumZ(); Vector* getLocalCurrents(); CMFD_PRECISION*** getBoundarySurfaceCurrents(); int convertFSRIdToCmfdCell(long fsr_id); int convertGlobalFSRIdToCmfdCell(long global_fsr_id); std::vector< std::vector<long> >* getCellFSRs(); bool isFluxUpdateOn(); bool isCentroidUpdateOn(); bool isSigmaTRebalanceOn(); /* Set parameters */ void setSORRelaxationFactor(double SOR_factor); void setCMFDRelaxationFactor(double relaxation_factor); void setGeometry(Geometry* geometry); void setWidthX(double width); void setWidthY(double width); void setWidthZ(double width); void setNumX(int num_x); void setNumY(int num_y); void setNumZ(int num_z); void setNumFSRs(long num_fsrs); void setNumMOCGroups(int num_moc_groups); void setBoundary(int side, boundaryType boundary); void setLatticeStructure(int num_x, int num_y, int num_z=1); void setFluxUpdateOn(bool flux_update_on); void setCentroidUpdateOn(bool centroid_update_on); void setGroupStructure(std::vector< std::vector<int> > group_indices); void setSourceConvergenceThreshold(double source_thresh); void setQuadrature(Quadrature* quadrature); void setNumUnboundedIterations(int unbounded_iterations); void setKNearest(int k_nearest); void setSolve3D(bool solve_3d); void setAzimSpacings(const std::vector<double>& azim_spacings, int num_azim); void setPolarSpacings(const std::vector< std::vector<double> >& polar_spacings, int num_azim, int num_polar); void setKeff(double k_eff); void setBackupGroupStructure(std::vector< std::vector<int> > group_indices); #ifdef MPIx void setNumDomains(int num_x, int num_y, int num_z); void setDomainIndexes(int idx_x, int idx_y, int idx_z); #endif void setConvergenceData(ConvergenceData* convergence_data); void useAxialInterpolation(int interpolate); /* Methods to try to fix stability issues */ void useFluxLimiting(bool flux_limiting); void enforceBalanceOnDiagonal(int cmfd_cell, int group); void rebalanceSigmaT(bool balance_sigma_t); /* Set FSR parameters */ void setFSRMaterials(Material** FSR_materials); void setFSRVolumes(FP_PRECISION* FSR_volumes); void setFSRFluxes(FP_PRECISION* scalar_flux); void setFSRSources(FP_PRECISION* sources); void setCellFSRs(std::vector< std::vector<long> >* cell_fsrs); void setFluxMoments(FP_PRECISION* flux_moments); /* Set XYZ widths of non-uniform meshes */ void setWidths(std::vector< std::vector<double> > widths); /* For debug use */ void printCmfdCellSizes(); /* For printing infomation about the CMFD object */ std::string toString(); }; /** * @brief Get the CMFD group given an MOC group. * @param group the MOC energy group * @return the CMFD energy group */ inline int Cmfd::getCmfdGroup(int group) { return _group_indices_map[group]; } /** * @brief Quickly finds a 3D CMFD surface given a cell, global coordinate, and * 2D CMFD surface. Intended for use in axial on-the-fly ray tracing. * @details If the coords is not on a surface, -1 is returned. If there is * no 2D CMFD surface intersection, -1 should be input for the 2D CMFD * surface. * @param cell_id The CMFD cell ID that the local coords is in. * @param z the axial height in the root universe of the point being evaluated. * @param surface_2D The ID of the 2D CMFD surface that the LocalCoords object * intersects. If there is no 2D intersection, -1 should be input. */ inline int Cmfd::findCmfdSurfaceOTF(int cell_id, double z, int surface_2D) { int global_cell_id = getGlobalCMFDCell(cell_id); return _lattice->getLatticeSurfaceOTF(global_cell_id, z, surface_2D); } /** * @brief Converts a local CMFD cell ID into its global ID * @param cmfd_cell The local CMFD cell ID * @return The global CMFD cell ID */ inline int Cmfd::getGlobalCMFDCell(int cmfd_cell) { int x_start = 0; int y_start = 0; int z_start = 0; if (_domain_communicator != NULL) { x_start = _accumulate_lmx[_domain_communicator->_domain_idx_x]; y_start = _accumulate_lmy[_domain_communicator->_domain_idx_y]; z_start = _accumulate_lmz[_domain_communicator->_domain_idx_z]; } int ix = cmfd_cell % _local_num_x; int iy = (cmfd_cell % (_local_num_x * _local_num_y)) / _local_num_x; int iz = cmfd_cell / (_local_num_x * _local_num_y); return ((iz + z_start) * _num_y + iy + y_start) * _num_x + ix + x_start; } /** * @brief Tallies the current contribution from this segment across the * the appropriate CMFD mesh cell surface. * @param curr_segment the current Track segment * @param track_flux the outgoing angular flux for this segment * @param azim_index azimuthal index of track angle * @param polar_index polar index of track angle * @param fwd boolean indicating direction of integration along segment */ inline void Cmfd::tallyCurrent(segment* curr_segment, float* track_flux, int azim_index, int polar_index, bool fwd) { int surf_id, cell_id, cmfd_group; int ncg = _num_cmfd_groups; /* Check if the current needs to be tallied */ bool tally_current = false; if (curr_segment->_cmfd_surface_fwd != -1 && fwd) { surf_id = curr_segment->_cmfd_surface_fwd % NUM_SURFACES; cell_id = curr_segment->_cmfd_surface_fwd / NUM_SURFACES; tally_current = true; } else if (curr_segment->_cmfd_surface_bwd != -1 && !fwd) { surf_id = curr_segment->_cmfd_surface_bwd % NUM_SURFACES; cell_id = curr_segment->_cmfd_surface_bwd / NUM_SURFACES; tally_current = true; } /* Tally current if necessary */ if (tally_current) { CMFD_PRECISION currents[_num_cmfd_groups] __attribute__ ((aligned(VEC_ALIGNMENT))); memset(currents, 0, _num_cmfd_groups * sizeof(CMFD_PRECISION)); int local_cell_id = getLocalCMFDCell(cell_id); if (_SOLVE_3D) { double wgt = _quadrature->getWeightInline(azim_index, polar_index); for (int e=0; e < _num_moc_groups; e++) { /* Get the CMFD group */ cmfd_group = getCmfdGroup(e); /* Increment the surface group current */ currents[cmfd_group] += track_flux[e]; } #pragma omp simd aligned(currents) for (int g=0; g < ncg; g++) currents[g] *= wgt; /* Increment currents on faces */ if (surf_id < NUM_FACES) { _surface_currents->incrementValues (local_cell_id, surf_id*ncg, (surf_id+1)*ncg - 1, currents); } /* Increment currents on corners and edges */ else { int first_ind = (local_cell_id * NUM_SURFACES + surf_id) * ncg; omp_set_lock(&_edge_corner_lock); #pragma omp simd aligned(currents) for (int g=0; g < ncg; g++) _edge_corner_currents[first_ind+g] += currents[g]; omp_unset_lock(&_edge_corner_lock); #ifdef INTEL #pragma omp flush #endif } } else { int pe = 0; for (int p=0; p < _num_polar/2; p++) { for (int e=0; e < _num_moc_groups; e++) { /* Get the CMFD group */ cmfd_group = getCmfdGroup(e); currents[cmfd_group] += track_flux[pe] * _quadrature->getWeightInline(azim_index, p); pe++; } } /* Increment currents on face */ if (surf_id < NUM_FACES) { _surface_currents->incrementValues (local_cell_id, surf_id*ncg, (surf_id+1)*ncg - 1, currents); } else { omp_set_lock(&_edge_corner_lock); int first_ind = (local_cell_id * NUM_SURFACES + surf_id) * ncg; /* Add contribution to corner current */ #pragma omp simd aligned(currents) for (int g=0; g < ncg; g++) _edge_corner_currents[first_ind+g] += currents[g]; omp_unset_lock(&_edge_corner_lock); #ifdef INTEL #pragma omp flush #endif } } } } #endif /* CMFD_H_ */
convolution_1x1_int8.h
// BUG1989 is pleased to support the open source community by supporting ncnn available. // // author:BUG1989 (https://github.com/BUG1989/) Long-term support. // author:FuGuangping (https://github.com/fu1899) Implemented the first version of INT8 quantization on ARMv7. // // Copyright (C) 2019 BUG1989. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static inline signed char float2int8(float v) { int int32 = round(v); if (int32 > 127) return 127; if (int32 < -127) return -127; return (signed char)int32; } #if __aarch64__ #if 1 #include "gemm_symm_int8.h" static void conv1x1s1_sgemm_transform_kernel_int8_neon(const Mat& _kernel, Mat& kernel_tm, int inch, int outch) { kernel_tm.create(outch, inch, (size_t)1u); const int8_t* a = _kernel; int8_t* sa = kernel_tm; reorder_a((int8_t*)a, sa, outch, inch, inch); } static void conv1x1s1_sgemm_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt) { const size_t n = bottom_blob.w * bottom_blob.h; const size_t k = bottom_blob.c; const size_t m = top_blob.c; ncnn::Mat bottom_tm(k * n, (size_t)1u, opt.workspace_allocator); { const int8_t* pData = bottom_blob; int8_t* pReorder = bottom_tm; reorder_b(pData, pReorder, k, n, bottom_blob.cstep); } // GEMM int32_t* pc = top_blob; const int8_t* pa = kernel; const int8_t* pb = bottom_tm; const size_t ldc = top_blob.cstep; int8kernel((void*)pc, pa, pb, m, k, n, ldc, 0, 0, opt); } static void conv1x1s1_sgemm_int8_requant_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, std::vector<float> scales_requant, const Option& opt) { const size_t n = bottom_blob.w * bottom_blob.h; const size_t k = bottom_blob.c; const size_t m = top_blob.c; ncnn::Mat scales_tm(m); ncnn::Mat bias_tm(m); float* scales = scales_tm; const float* bias = _bias; // outptr0[0] = float2int8(((float)sum0 * scale_requant_in + bias0) * scale_requant_out); // the equation could convert to: // out = float2int8( (float)sum * (scale_requant_in * scale_requant_out) + (bias * scale_requant_out) ) // prebuild the list of (scales_requant_in*scale_requant_out) for (size_t i = 0; i < m; ++i) { scales_tm[i] = scales_requant[2 * i] * scales_requant[2 * i + 1]; } if (!_bias.empty()) { for (size_t i = 0; i < m; ++i) { bias_tm[i] = bias[i] * scales_requant[2 * i + 1]; } bias = bias_tm; } ncnn::Mat bottom_tm(k * n, (size_t)1u, opt.workspace_allocator); { const int8_t* pData = bottom_blob; int8_t* pReorder = bottom_tm; reorder_b(pData, pReorder, k, n, bottom_blob.cstep); } // GEMM int8_t* pc = top_blob; const int8_t* pa = kernel; const int8_t* pb = bottom_tm; const size_t ldc = top_blob.cstep; int8kernel((void*)pc, pa, pb, m, k, n, ldc, scales, (float*)bias, opt); } #else static void conv1x1s1_sgemm_transform_kernel_int8_neon(const Mat& _kernel, Mat& kernel_tm, int inch, int outch) { const signed char* kernel = _kernel; // kernel memory packed 4 x 4 kernel_tm.create(4 * 4, inch / 4 + inch % 4, outch / 4 + outch % 4, (size_t)1u); int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 2; remain_outch_start = nn_outch << 2; for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 4; const signed char* k0 = kernel + (p + 0) * inch; const signed char* k1 = kernel + (p + 1) * inch; const signed char* k2 = kernel + (p + 2) * inch; const signed char* k3 = kernel + (p + 3) * inch; signed char* ktmp = kernel_tm.channel(p / 4); int q = 0; for (; q + 1 < inch; q += 2) { ktmp[0] = k0[0]; ktmp[1] = k0[1]; ktmp[2] = k1[0]; ktmp[3] = k1[1]; ktmp[4] = k2[0]; ktmp[5] = k2[1]; ktmp[6] = k3[0]; ktmp[7] = k3[1]; ktmp += 8; k0 += 2; k1 += 2; k2 += 2; k3 += 2; } for (; q < inch; q++) { ktmp[0] = k0[0]; ktmp[1] = k1[0]; ktmp[2] = k2[0]; ktmp[3] = k3[0]; ktmp += 4; k0 += 1; k1 += 1; k2 += 1; k3 += 1; } } for (int p = remain_outch_start; p < outch; p++) { const signed char* k0 = kernel + (p + 0) * inch; signed char* ktmp = kernel_tm.channel(p / 4 + p % 4); int q = 0; for (; q + 1 < inch; q = q + 2) { ktmp[0] = k0[0]; ktmp[1] = k0[1]; ktmp += 2; k0 += 2; } for (; q < inch; q++) { ktmp[0] = k0[0]; ktmp++; k0++; } } } static void conv1x1s1_sgemm_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outch = top_blob.c; const int size = w * h; // bottom_tm memory packed 4 x 4 ncnn::Mat bottom_tm(4, inch, size / 4 + size % 4, (size_t)1u, opt.workspace_allocator); { int nn_size = size >> 2; int remain_size_start = nn_size << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = ii * 4; const signed char* img0 = bottom_blob.channel(0); const signed char* img1 = bottom_blob.channel(1); img0 += i; img1 += i; signed char* tmpptr = bottom_tm.channel(i / 4); int q = 0; for (; q + 1 < inch; q = q + 2) { tmpptr[0] = img0[0]; tmpptr[1] = img1[0]; tmpptr[2] = img0[1]; tmpptr[3] = img1[1]; tmpptr[4] = img0[2]; tmpptr[5] = img1[2]; tmpptr[6] = img0[3]; tmpptr[7] = img1[3]; tmpptr += 8; img0 += bottom_blob.cstep; img0 += bottom_blob.cstep; img1 += bottom_blob.cstep; img1 += bottom_blob.cstep; } for (; q < inch; q++) { tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr[2] = img0[2]; tmpptr[3] = img0[3]; tmpptr += 4; img0 += bottom_blob.cstep; } } #pragma omp parallel for num_threads(opt.num_threads) for (int i = remain_size_start; i < size; i++) { const signed char* img0 = bottom_blob.channel(0); img0 += i; signed char* tmpptr = bottom_tm.channel(i / 4 + i % 4); for (int q = 0; q < inch; q++) { tmpptr[0] = img0[0]; tmpptr += 1; img0 += bottom_blob.cstep; } } } // sgemm process int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 2; remain_outch_start = nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 4; int* outptr0 = top_blob.channel(p); int* outptr1 = top_blob.channel(p + 1); int* outptr2 = top_blob.channel(p + 2); int* outptr3 = top_blob.channel(p + 3); int i = 0; for (; i + 3 < size; i += 4) { signed char* tmpptr = bottom_tm.channel(i / 4); const signed char* kptr = kernel.channel(p / 4); #if __ARM_NEON asm volatile( "prfm pldl1keep, [%4, #128] \n" "prfm pldl1keep, [%5, #128] \n" "eor v16.16b, v16.16b, v16.16b \n" // sum0 "eor v17.16b, v17.16b, v17.16b \n" // sum1 "eor v18.16b, v18.16b, v18.16b \n" // sum2 "eor v19.16b, v19.16b, v19.16b \n" // sum3 "lsr w4, %w12, #2 \n" // r4 = nn = L >> 2 "cmp w4, #0 \n" "beq 1f \n" "0: \n" // for (; k+3<L; k=k+4) "ld1 {v0.16b}, [%4] \n" // i0, i1, i2, i3 "ld1 {v4.16b}, [%5] \n" // k0, k1, k2, k3 "add %4, %4, #16 \n" "add %5, %5, #16 \n" "rev32 v1.8h, v0.8h \n" // i1, i0, i3, i2 "rev64 v2.4s, v0.4s \n" // i2, i3, i0, i1 "rev64 v3.8h, v0.8h \n" // i3, i2, i1, i0 "smull v8.8h, v4.8b, v0.8b \n" "smull v9.8h, v4.8b, v1.8b \n" "smull v10.8h, v4.8b, v2.8b \n" "smull v11.8h, v4.8b, v3.8b \n" "prfm pldl1keep, [%4, #1024] \n" "prfm pldl1keep, [%5, #1024] \n" "smlal2 v8.8h, v4.16b, v0.16b \n" "smlal2 v9.8h, v4.16b, v1.16b \n" "smlal2 v10.8h, v4.16b, v2.16b \n" "smlal2 v11.8h, v4.16b, v3.16b \n" "sadalp v16.4s, v8.8h \n" // i0k0, i1k1, i2k2, i3k3 "sadalp v17.4s, v9.8h \n" // i1k0, i0k1, i3k2, i2k3 "sadalp v18.4s, v10.8h \n" // i2k0, i3k1, i0k2, i1k3 "sadalp v19.4s, v11.8h \n" // i3k0, i2k1, i1k2, i0k3 "subs w4, w4, #1 \n" "bne 0b \n" "1: \n" // for (; k+1<L; k=k+2) // remain loop "and w4, %w12, #3 \n" // w4 = remain = K & 3; "cmp w4, #0 \n" "beq 3f \n" "lsr w4, w4, #1 \n" // r4 = nn = L >> 1 "cmp w4, #0 \n" "beq 3f \n" "2: \n" // for (; k+1<L; k=k+2) "ld1 {v0.8b}, [%4] \n" // i0, i1, i2, i3 "ld1 {v4.8b}, [%5] \n" // k0, k1, k2, k3 "add %4, %4, #8 \n" "add %5, %5, #8 \n" "rev32 v1.4h, v0.4h \n" // i2, i3, i0, i1 "rev64 v2.2s, v0.2s \n" // i1, i0, i3, i2 "rev64 v3.4h, v0.4h \n" // i0, i1, i2, i3 "smull v8.8h, v4.8b, v0.8b \n" "smull v9.8h, v4.8b, v1.8b \n" "smull v10.8h, v4.8b, v2.8b \n" "smull v11.8h, v4.8b, v3.8b \n" "sadalp v16.4s, v8.8h \n" "sadalp v17.4s, v9.8h \n" "sadalp v18.4s,v10.8h \n" "sadalp v19.4s,v11.8h \n" "subs w4, w4, #1 \n" "bne 2b \n" "3: \n" // realloc "mov v20.s[0], v16.s[0] \n" "mov v20.s[1], v17.s[0] \n" "mov v20.s[2], v18.s[0] \n" "mov v20.s[3], v19.s[0] \n" "mov v21.s[0], v17.s[1] \n" "mov v21.s[1], v16.s[1] \n" "mov v21.s[2], v19.s[1] \n" "mov v21.s[3], v18.s[1] \n" "mov v22.s[0], v18.s[2] \n" "mov v22.s[1], v19.s[2] \n" "mov v22.s[2], v16.s[2] \n" "mov v22.s[3], v17.s[2] \n" "mov v23.s[0], v19.s[3] \n" "mov v23.s[1], v18.s[3] \n" "mov v23.s[2], v17.s[3] \n" "mov v23.s[3], v16.s[3] \n" "and w4, %w12, #1 \n" // w4 = remain = K & 1; "cmp w4, #0 \n" "beq 5f \n" "4: \n" "ld1 {v0.8b}, [%4] \n" "ld1 {v1.8b}, [%5] \n" "add %4, %4, #4 \n" "add %5, %5, #4 \n" "sshll v0.8h, v0.8b, #0 \n" // i0[0], i1[0], i2[0], i3[0] "sshll v1.8h, v1.8b, #0 \n" // k0[0], k1[0], k2[0], k3[0] "smlal v20.4s, v0.4h, v1.h[0] \n" // i0k0, i1k0, i2k0, i3k0 "smlal v21.4s, v0.4h, v1.h[1] \n" // i0k1, i1k1, i2k1, i3k1 "smlal v22.4s, v0.4h, v1.h[2] \n" // i0k2, i1k2, i2k2, i3k2 "smlal v23.4s, v0.4h, v1.h[3] \n" // i0k3, i1k3, i2k3, i3k3 "subs w4, w4, #1 \n" "bne 2b \n" "5: \n" "st1 {v20.4s}, [%0] \n" "st1 {v21.4s}, [%1] \n" "st1 {v22.4s}, [%2] \n" "st1 {v23.4s}, [%3] \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(tmpptr), // %4 "=r"(kptr) // %5 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(tmpptr), "5"(kptr), "r"(inch) // %12 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); #else int sum0_0 = 0; int sum0_1 = 0; int sum0_2 = 0; int sum0_3 = 0; int sum1_0 = 0; int sum1_1 = 0; int sum1_2 = 0; int sum1_3 = 0; int sum2_0 = 0; int sum2_1 = 0; int sum2_2 = 0; int sum2_3 = 0; int sum3_0 = 0; int sum3_1 = 0; int sum3_2 = 0; int sum3_3 = 0; int q = 0; for (; q + 1 < inch; q = q + 2) { sum0_0 += tmpptr[0] * kptr[0]; sum0_0 += tmpptr[1] * kptr[1]; sum0_1 += tmpptr[2] * kptr[0]; sum0_1 += tmpptr[3] * kptr[1]; sum0_2 += tmpptr[4] * kptr[0]; sum0_2 += tmpptr[5] * kptr[1]; sum0_3 += tmpptr[6] * kptr[0]; sum0_3 += tmpptr[7] * kptr[1]; sum1_0 += tmpptr[0] * kptr[2]; sum1_0 += tmpptr[1] * kptr[3]; sum1_1 += tmpptr[2] * kptr[2]; sum1_1 += tmpptr[3] * kptr[3]; sum1_2 += tmpptr[4] * kptr[2]; sum1_2 += tmpptr[5] * kptr[3]; sum1_3 += tmpptr[6] * kptr[2]; sum1_3 += tmpptr[7] * kptr[3]; sum2_0 += tmpptr[0] * kptr[4]; sum2_0 += tmpptr[1] * kptr[5]; sum2_1 += tmpptr[2] * kptr[4]; sum2_1 += tmpptr[3] * kptr[5]; sum2_2 += tmpptr[4] * kptr[4]; sum2_2 += tmpptr[5] * kptr[5]; sum2_3 += tmpptr[6] * kptr[4]; sum2_3 += tmpptr[7] * kptr[5]; sum3_0 += tmpptr[0] * kptr[6]; sum3_0 += tmpptr[1] * kptr[7]; sum3_1 += tmpptr[2] * kptr[6]; sum3_1 += tmpptr[3] * kptr[7]; sum3_2 += tmpptr[4] * kptr[6]; sum3_2 += tmpptr[5] * kptr[7]; sum3_3 += tmpptr[6] * kptr[6]; sum3_3 += tmpptr[7] * kptr[7]; tmpptr += 8; kptr += 8; } for (; q < inch; q++) { sum0_0 += tmpptr[0] * kptr[0]; sum0_1 += tmpptr[1] * kptr[0]; sum0_2 += tmpptr[2] * kptr[0]; sum0_3 += tmpptr[3] * kptr[0]; sum1_0 += tmpptr[0] * kptr[1]; sum1_1 += tmpptr[1] * kptr[1]; sum1_2 += tmpptr[2] * kptr[1]; sum1_3 += tmpptr[3] * kptr[1]; sum2_0 += tmpptr[0] * kptr[2]; sum2_1 += tmpptr[1] * kptr[2]; sum2_2 += tmpptr[2] * kptr[2]; sum2_3 += tmpptr[3] * kptr[2]; sum3_0 += tmpptr[0] * kptr[3]; sum3_1 += tmpptr[1] * kptr[3]; sum3_2 += tmpptr[2] * kptr[3]; sum3_3 += tmpptr[3] * kptr[3]; tmpptr += 4; kptr += 4; } outptr0[0] = sum0_0; outptr0[1] = sum0_1; outptr0[2] = sum0_2; outptr0[3] = sum0_3; outptr1[0] = sum1_0; outptr1[1] = sum1_1; outptr1[2] = sum1_2; outptr1[3] = sum1_3; outptr2[0] = sum2_0; outptr2[1] = sum2_1; outptr2[2] = sum2_2; outptr2[3] = sum2_3; outptr3[0] = sum3_0; outptr3[1] = sum3_1; outptr3[2] = sum3_2; outptr3[3] = sum3_3; #endif outptr0 += 4; outptr1 += 4; outptr2 += 4; outptr3 += 4; } for (; i < size; i++) { signed char* tmpptr = bottom_tm.channel(i / 4 + i % 4); const signed char* kptr = kernel.channel(p / 4); #if 0 //__ARM_NEON int32x4_t _sum = vdupq_n_s32(0); int q=0; for (; q+3<inch; q=q+4) { int8x8_t _r0 = vld1_s8(tmpptr); // i0[0-3] int8x8x2_t _k = vld2_s8(kptr); // k0[0-1], k1[0-1], k2[0-1], k3[0-1];k0[2-3], k1[2-3], k2[2-3], k3[2-3] int16x8_t _r0_s16 = vmovl_s8(_r0); // i0[0],i0[1],i0[2],i0[3] int16x8_t _k02_s16 = vmovl_s8(_k.val[0]); // k0[0],k1[0],k2[0],k3[0],k0[2],k1[2],k2[2],k3[2] int16x8_t _k13_s16 = vmovl_s8(_k.val[1]); // k0[1],k1[1],k2[1],k3[1],k0[3],k1[3],k2[3],k3[3] _sum = vmlal_lane_s16(_sum, vget_low_s16(_k02_s16), vget_low_s16(_r0_s16), 0); // i0[0]*k[0-3][0] _sum = vmlal_lane_s16(_sum, vget_low_s16(_k13_s16), vget_low_s16(_r0_s16), 1); // i0[1]*k[0-3][1] _sum = vmlal_lane_s16(_sum, vget_high_s16(_k02_s16), vget_low_s16(_r0_s16), 2); // i0[2]*k[0-3][2] _sum = vmlal_lane_s16(_sum, vget_high_s16(_k13_s16), vget_low_s16(_r0_s16), 3); // i0[3]*k[0-3][3] tmpptr += 4; kptr += 16; } for (; q+1<inch; q=q+2) { int8x8_t _r0 = vld1_s8(tmpptr); // i0[0-3] int8x8_t _k = vld1_s8(kptr); // k0[0-1], k1[0-1], k2[0-1], k3[0-1] _r0[2] = _r0[0]; _r0[3] = _r0[1]; _r0[4] = _r0[0]; _r0[5] = _r0[1]; _r0[6] = _r0[0]; _r0[7] = _r0[1]; int16x8_t _tp0 = vmull_s8(_k, _r0); _sum = vpadalq_s16(_sum, _tp0); tmpptr += 2; kptr += 8; } for (; q<inch; q++) { int8x8_t _r0 = vld1_s8(tmpptr); // i0[0-3] int8x8_t _k = vld1_s8(kptr); // k[0-3][0] int16x8_t _tp0 = vmull_s8(_k, _r0); _sum = vaddw_s16(_sum, vget_low_s16(_tp0)); tmpptr += 1; kptr += 4; } vst1q_lane_s32(outptr0, _sum, 0); vst1q_lane_s32(outptr1, _sum, 1); vst1q_lane_s32(outptr2, _sum, 2); vst1q_lane_s32(outptr3, _sum, 3); #else int sum0 = 0; int sum1 = 0; int sum2 = 0; int sum3 = 0; int q = 0; for (; q + 1 < inch; q = q + 2) { sum0 += tmpptr[0] * kptr[0]; sum0 += tmpptr[1] * kptr[1]; sum1 += tmpptr[0] * kptr[2]; sum1 += tmpptr[1] * kptr[3]; sum2 += tmpptr[0] * kptr[4]; sum2 += tmpptr[1] * kptr[5]; sum3 += tmpptr[0] * kptr[6]; sum3 += tmpptr[1] * kptr[7]; tmpptr += 2; kptr += 8; } for (; q < inch; q++) { sum0 += tmpptr[0] * kptr[0]; sum1 += tmpptr[0] * kptr[1]; sum2 += tmpptr[0] * kptr[2]; sum3 += tmpptr[0] * kptr[3]; tmpptr += 1; kptr += 4; } outptr0[0] = sum0; outptr1[0] = sum1; outptr2[0] = sum2; outptr3[0] = sum3; #endif outptr0++; outptr1++; outptr2++; outptr3++; } } #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { Mat out0 = top_blob.channel(p); int* outptr0 = out0; int i = 0; for (; i + 3 < size; i += 4) { signed char* tmpptr = bottom_tm.channel(i / 4); const signed char* kptr = kernel.channel(p / 4 + p % 4); #if __ARM_NEON int32x4_t _sum = vdupq_n_s32(0); int q = 0; for (; q + 1 < inch; q = q + 2) { int8x8_t _r0 = vld1_s8(tmpptr); // i0[0-1], i1[0-1], i2[0-1], i3[0-1] int8x8_t _k = vld1_s8(kptr); // k0[0-1] _k[2] = _k[0]; _k[3] = _k[1]; _k[4] = _k[0]; _k[5] = _k[1]; _k[6] = _k[0]; _k[7] = _k[1]; int16x8_t _tp0 = vmull_s8(_k, _r0); _sum = vpadalq_s16(_sum, _tp0); tmpptr += 8; kptr += 2; } for (; q < inch; q++) { int8x8_t _r0 = vld1_s8(tmpptr); // i0[0], i1[0], i2[0], i3[0] int8x8_t _k = vld1_s8(kptr); // k[0][0] int16x8_t _r0_s16 = vmovl_s8(_r0); int16x8_t _k_s16 = vmovl_s8(_k); _sum = vmlal_lane_s16(_sum, vget_low_s16(_r0_s16), vget_low_s16(_k_s16), 0); // i0k0, i1k0, i2k0, i3k0 tmpptr += 4; kptr += 1; } vst1q_s32(outptr0, _sum); #else int sum0 = 0; int sum1 = 0; int sum2 = 0; int sum3 = 0; int q = 0; for (; q + 1 < inch; q = q + 2) { sum0 += tmpptr[0] * kptr[0]; sum0 += tmpptr[1] * kptr[1]; sum1 += tmpptr[2] * kptr[0]; sum1 += tmpptr[3] * kptr[1]; sum2 += tmpptr[4] * kptr[0]; sum2 += tmpptr[5] * kptr[1]; sum3 += tmpptr[6] * kptr[0]; sum3 += tmpptr[7] * kptr[1]; tmpptr += 8; kptr += 2; } for (; q < inch; q++) { sum0 += tmpptr[0] * kptr[0]; sum1 += tmpptr[1] * kptr[0]; sum2 += tmpptr[2] * kptr[0]; sum3 += tmpptr[3] * kptr[0]; tmpptr += 4; kptr++; } outptr0[0] = sum0; outptr0[1] = sum1; outptr0[2] = sum2; outptr0[3] = sum3; #endif outptr0 += 4; } for (; i < size; i++) { signed char* tmpptr = bottom_tm.channel(i / 4 + i % 4); const signed char* kptr = kernel.channel(p / 4 + p % 4); int q = 0; int sum0 = 0; for (; q < inch; q++) { sum0 += tmpptr[0] * kptr[0]; tmpptr++; kptr++; } outptr0[0] = sum0; outptr0++; } } } static void conv1x1s1_sgemm_int8_requant_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, std::vector<float> scales_requant, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outch = top_blob.c; const int size = w * h; const float* bias = _bias; // bottom_tm memory packed 4 x 4 ncnn::Mat bottom_tm(4, inch, size / 4 + size % 4, (size_t)1u, opt.workspace_allocator); { int nn_size = size >> 2; int remain_size_start = nn_size << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = ii * 4; const signed char* img0 = bottom_blob.channel(0); const signed char* img1 = bottom_blob.channel(1); img0 += i; img1 += i; signed char* tmpptr = bottom_tm.channel(i / 4); int q = 0; for (; q + 1 < inch; q = q + 2) { tmpptr[0] = img0[0]; tmpptr[1] = img1[0]; tmpptr[2] = img0[1]; tmpptr[3] = img1[1]; tmpptr[4] = img0[2]; tmpptr[5] = img1[2]; tmpptr[6] = img0[3]; tmpptr[7] = img1[3]; tmpptr += 8; img0 += bottom_blob.cstep; img0 += bottom_blob.cstep; img1 += bottom_blob.cstep; img1 += bottom_blob.cstep; } for (; q < inch; q++) { tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr[2] = img0[2]; tmpptr[3] = img0[3]; tmpptr += 4; img0 += bottom_blob.cstep; } } #pragma omp parallel for num_threads(opt.num_threads) for (int i = remain_size_start; i < size; i++) { const signed char* img0 = bottom_blob.channel(0); img0 += i; signed char* tmpptr = bottom_tm.channel(i / 4 + i % 4); for (int q = 0; q < inch; q++) { tmpptr[0] = img0[0]; tmpptr += 1; img0 += bottom_blob.cstep; } } } // sgemm process int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 2; remain_outch_start = nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 4; signed char* outptr0 = top_blob.channel(p); signed char* outptr1 = top_blob.channel(p + 1); signed char* outptr2 = top_blob.channel(p + 2); signed char* outptr3 = top_blob.channel(p + 3); const float bias0 = bias ? bias[p] : 0.f; const float bias1 = bias ? bias[p + 1] : 0.f; const float bias2 = bias ? bias[p + 2] : 0.f; const float bias3 = bias ? bias[p + 3] : 0.f; const float scale_requant_in0 = scales_requant[2 * p]; const float scale_requant_out0 = scales_requant[2 * p + 1]; const float scale_requant_in1 = scales_requant[2 * (p + 1)]; const float scale_requant_out1 = scales_requant[2 * (p + 1) + 1]; const float scale_requant_in2 = scales_requant[2 * (p + 2)]; const float scale_requant_out2 = scales_requant[2 * (p + 2) + 1]; const float scale_requant_in3 = scales_requant[2 * (p + 3)]; const float scale_requant_out3 = scales_requant[2 * (p + 3) + 1]; float32x4_t _bias03, _scale_in03, _scale_out03; float32x4_t _bias0 = vdupq_n_f32(bias0); float32x4_t _bias1 = vdupq_n_f32(bias1); float32x4_t _bias2 = vdupq_n_f32(bias2); float32x4_t _bias3 = vdupq_n_f32(bias3); _bias03[0] = bias0; _bias03[1] = bias1; _bias03[2] = bias2; _bias03[3] = bias3; _scale_in03[0] = scale_requant_in0; _scale_in03[1] = scale_requant_in1; _scale_in03[2] = scale_requant_in2; _scale_in03[3] = scale_requant_in3; _scale_out03[0] = scale_requant_out0; _scale_out03[1] = scale_requant_out1; _scale_out03[2] = scale_requant_out2; _scale_out03[3] = scale_requant_out3; int i = 0; for (; i + 3 < size; i += 4) { signed char* tmpptr = bottom_tm.channel(i / 4); const signed char* kptr = kernel.channel(p / 4); #if 1 //__ARM_NEON asm volatile( "prfm pldl1keep, [%4, #128] \n" "prfm pldl1keep, [%5, #128] \n" "eor v16.16b, v16.16b, v16.16b \n" // sum0 "eor v17.16b, v17.16b, v17.16b \n" // sum1 "eor v18.16b, v18.16b, v18.16b \n" // sum2 "eor v19.16b, v19.16b, v19.16b \n" // sum3 "lsr w4, %w12, #2 \n" // r4 = nn = L >> 2 "cmp w4, #0 \n" "beq 1f \n" "0: \n" // for (; k+3<L; k=k+4) "ld1 {v0.16b}, [%4] \n" // i0, i1, i2, i3 "ld1 {v4.16b}, [%5] \n" // k0, k1, k2, k3 "add %4, %4, #16 \n" "add %5, %5, #16 \n" "rev32 v1.8h, v0.8h \n" // i1, i0, i3, i2 "rev64 v2.4s, v0.4s \n" // i2, i3, i0, i1 "rev64 v3.8h, v0.8h \n" // i3, i2, i1, i0 "smull v8.8h, v4.8b, v0.8b \n" "smull v9.8h, v4.8b, v1.8b \n" "smull v10.8h, v4.8b, v2.8b \n" "smull v11.8h, v4.8b, v3.8b \n" "prfm pldl1keep, [%4, #1024] \n" "prfm pldl1keep, [%5, #1024] \n" "smlal2 v8.8h, v4.16b, v0.16b \n" "smlal2 v9.8h, v4.16b, v1.16b \n" "smlal2 v10.8h, v4.16b, v2.16b \n" "smlal2 v11.8h, v4.16b, v3.16b \n" "sadalp v16.4s, v8.8h \n" // i0k0, i1k1, i2k2, i3k3 "sadalp v17.4s, v9.8h \n" // i1k0, i0k1, i3k2, i2k3 "sadalp v18.4s, v10.8h \n" // i2k0, i3k1, i0k2, i1k3 "sadalp v19.4s, v11.8h \n" // i3k0, i2k1, i1k2, i0k3 "subs w4, w4, #1 \n" "bne 0b \n" "1: \n" // for (; k+1<L; k=k+2) // remain loop "and w4, %w12, #3 \n" // w4 = remain = K & 3; "cmp w4, #0 \n" "beq 3f \n" "lsr w4, w4, #1 \n" // r4 = nn = L >> 1 "cmp w4, #0 \n" "beq 3f \n" "2: \n" // for (; k+1<L; k=k+2) "ld1 {v0.8b}, [%4] \n" // i0, i1, i2, i3 "ld1 {v4.8b}, [%5] \n" // k0, k1, k2, k3 "add %4, %4, #8 \n" "add %5, %5, #8 \n" "rev32 v1.4h, v0.4h \n" // i2, i3, i0, i1 "rev64 v2.2s, v0.2s \n" // i1, i0, i3, i2 "rev64 v3.4h, v0.4h \n" // i0, i1, i2, i3 "smull v8.8h, v4.8b, v0.8b \n" "smull v9.8h, v4.8b, v1.8b \n" "smull v10.8h, v4.8b, v2.8b \n" "smull v11.8h, v4.8b, v3.8b \n" "sadalp v16.4s, v8.8h \n" "sadalp v17.4s, v9.8h \n" "sadalp v18.4s,v10.8h \n" "sadalp v19.4s,v11.8h \n" "subs w4, w4, #1 \n" "bne 2b \n" "3: \n" // realloc "mov v20.s[0], v16.s[0] \n" "mov v20.s[1], v17.s[0] \n" "mov v20.s[2], v18.s[0] \n" "mov v20.s[3], v19.s[0] \n" "mov v21.s[0], v17.s[1] \n" "mov v21.s[1], v16.s[1] \n" "mov v21.s[2], v19.s[1] \n" "mov v21.s[3], v18.s[1] \n" "mov v22.s[0], v18.s[2] \n" "mov v22.s[1], v19.s[2] \n" "mov v22.s[2], v16.s[2] \n" "mov v22.s[3], v17.s[2] \n" "mov v23.s[0], v19.s[3] \n" "mov v23.s[1], v18.s[3] \n" "mov v23.s[2], v17.s[3] \n" "mov v23.s[3], v16.s[3] \n" "and w4, %w12, #1 \n" // w4 = remain = K & 1; "cmp w4, #0 \n" "beq 5f \n" "4: \n" "ld1 {v0.8b}, [%4] \n" "ld1 {v1.8b}, [%5] \n" "add %4, %4, #4 \n" "add %5, %5, #4 \n" "sshll v0.8h, v0.8b, #0 \n" // i0[0], i1[0], i2[0], i3[0] "sshll v1.8h, v1.8b, #0 \n" // k0[0], k1[0], k2[0], k3[0] "smlal v20.4s, v0.4h, v1.h[0] \n" // i0k0, i1k0, i2k0, i3k0 "smlal v21.4s, v0.4h, v1.h[1] \n" // i0k1, i1k1, i2k1, i3k1 "smlal v22.4s, v0.4h, v1.h[2] \n" // i0k2, i1k2, i2k2, i3k2 "smlal v23.4s, v0.4h, v1.h[3] \n" // i0k3, i1k3, i2k3, i3k3 "subs w4, w4, #1 \n" "bne 2b \n" "5: \n" // top_s32 -> top_f32 "scvtf v20.4s, v20.4s \n" "scvtf v21.4s, v21.4s \n" "scvtf v22.4s, v22.4s \n" "scvtf v23.4s, v23.4s \n" // top_f32 = top_f32 * scale_in "fmul v20.4s, v20.4s, %17.s[0] \n" "fmul v21.4s, v21.4s, %17.s[1] \n" "fmul v22.4s, v22.4s, %17.s[2] \n" "fmul v23.4s, v23.4s, %17.s[3] \n" // top_f32 = top_f32 + bias "fadd v20.4s, v20.4s, %13.4s \n" "fadd v21.4s, v21.4s, %14.4s \n" "fadd v22.4s, v22.4s, %15.4s \n" "fadd v23.4s, v23.4s, %16.4s \n" // top_f32 = top_f32 * scale_out "fmul v20.4s, v20.4s, %18.s[0] \n" "fmul v21.4s, v21.4s, %18.s[1] \n" "fmul v22.4s, v22.4s, %18.s[2] \n" "fmul v23.4s, v23.4s, %18.s[3] \n" // top_f32 -> top_s32 "fcvtas v20.4s, v20.4s \n" "fcvtas v21.4s, v21.4s \n" "fcvtas v22.4s, v22.4s \n" "fcvtas v23.4s, v23.4s \n" // top_s32 -> top_s16 "sqxtn v7.4h, v20.4s \n" "sqxtn2 v7.8h, v21.4s \n" "sqxtn v8.4h, v22.4s \n" "sqxtn2 v8.8h, v23.4s \n" // top_s16 -> top_s8 "sqxtn v0.8b, v7.8h \n" "sqxtn v1.8b, v8.8h \n" // save top_s8 "st1 {v0.s}[0], [%0] \n" "st1 {v0.s}[1], [%1] \n" "st1 {v1.s}[0], [%2] \n" "st1 {v1.s}[1], [%3] \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(tmpptr), // %4 "=r"(kptr) // %5 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(tmpptr), "5"(kptr), "r"(inch), // %12 "w"(_bias0), // %13 "w"(_bias1), // %14 "w"(_bias2), // %15 "w"(_bias3), // %16 "w"(_scale_in03), // %17 "w"(_scale_out03) // %18 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); #else int sum0_0 = 0; int sum0_1 = 0; int sum0_2 = 0; int sum0_3 = 0; int sum1_0 = 0; int sum1_1 = 0; int sum1_2 = 0; int sum1_3 = 0; int sum2_0 = 0; int sum2_1 = 0; int sum2_2 = 0; int sum2_3 = 0; int sum3_0 = 0; int sum3_1 = 0; int sum3_2 = 0; int sum3_3 = 0; int q = 0; for (; q + 1 < inch; q = q + 2) { sum0_0 += tmpptr[0] * kptr[0]; sum0_0 += tmpptr[1] * kptr[1]; sum0_1 += tmpptr[2] * kptr[0]; sum0_1 += tmpptr[3] * kptr[1]; sum0_2 += tmpptr[4] * kptr[0]; sum0_2 += tmpptr[5] * kptr[1]; sum0_3 += tmpptr[6] * kptr[0]; sum0_3 += tmpptr[7] * kptr[1]; sum1_0 += tmpptr[0] * kptr[2]; sum1_0 += tmpptr[1] * kptr[3]; sum1_1 += tmpptr[2] * kptr[2]; sum1_1 += tmpptr[3] * kptr[3]; sum1_2 += tmpptr[4] * kptr[2]; sum1_2 += tmpptr[5] * kptr[3]; sum1_3 += tmpptr[6] * kptr[2]; sum1_3 += tmpptr[7] * kptr[3]; sum2_0 += tmpptr[0] * kptr[4]; sum2_0 += tmpptr[1] * kptr[5]; sum2_1 += tmpptr[2] * kptr[4]; sum2_1 += tmpptr[3] * kptr[5]; sum2_2 += tmpptr[4] * kptr[4]; sum2_2 += tmpptr[5] * kptr[5]; sum2_3 += tmpptr[6] * kptr[4]; sum2_3 += tmpptr[7] * kptr[5]; sum3_0 += tmpptr[0] * kptr[6]; sum3_0 += tmpptr[1] * kptr[7]; sum3_1 += tmpptr[2] * kptr[6]; sum3_1 += tmpptr[3] * kptr[7]; sum3_2 += tmpptr[4] * kptr[6]; sum3_2 += tmpptr[5] * kptr[7]; sum3_3 += tmpptr[6] * kptr[6]; sum3_3 += tmpptr[7] * kptr[7]; tmpptr += 8; kptr += 8; } for (; q < inch; q++) { sum0_0 += tmpptr[0] * kptr[0]; sum0_1 += tmpptr[1] * kptr[0]; sum0_2 += tmpptr[2] * kptr[0]; sum0_3 += tmpptr[3] * kptr[0]; sum1_0 += tmpptr[0] * kptr[1]; sum1_1 += tmpptr[1] * kptr[1]; sum1_2 += tmpptr[2] * kptr[1]; sum1_3 += tmpptr[3] * kptr[1]; sum2_0 += tmpptr[0] * kptr[2]; sum2_1 += tmpptr[1] * kptr[2]; sum2_2 += tmpptr[2] * kptr[2]; sum2_3 += tmpptr[3] * kptr[2]; sum3_0 += tmpptr[0] * kptr[3]; sum3_1 += tmpptr[1] * kptr[3]; sum3_2 += tmpptr[2] * kptr[3]; sum3_3 += tmpptr[3] * kptr[3]; tmpptr += 4; kptr += 4; } outptr0[0] = float2int8(((float)sum0_0 * scale_requant_in0 + bias0) * scale_requant_out0); outptr0[1] = float2int8(((float)sum0_1 * scale_requant_in0 + bias0) * scale_requant_out0); outptr0[2] = float2int8(((float)sum0_2 * scale_requant_in0 + bias0) * scale_requant_out0); outptr0[3] = float2int8(((float)sum0_3 * scale_requant_in0 + bias0) * scale_requant_out0); outptr1[0] = float2int8(((float)sum1_0 * scale_requant_in1 + bias1) * scale_requant_out1); outptr1[1] = float2int8(((float)sum1_1 * scale_requant_in1 + bias1) * scale_requant_out1); outptr1[2] = float2int8(((float)sum1_2 * scale_requant_in1 + bias1) * scale_requant_out1); outptr1[3] = float2int8(((float)sum1_3 * scale_requant_in1 + bias1) * scale_requant_out1); outptr2[0] = float2int8(((float)sum2_0 * scale_requant_in2 + bias2) * scale_requant_out2); outptr2[1] = float2int8(((float)sum2_1 * scale_requant_in2 + bias2) * scale_requant_out2); outptr2[2] = float2int8(((float)sum2_2 * scale_requant_in2 + bias2) * scale_requant_out2); outptr2[3] = float2int8(((float)sum2_3 * scale_requant_in2 + bias2) * scale_requant_out2); outptr3[0] = float2int8(((float)sum3_0 * scale_requant_in3 + bias3) * scale_requant_out3); outptr3[1] = float2int8(((float)sum3_1 * scale_requant_in3 + bias3) * scale_requant_out3); outptr3[2] = float2int8(((float)sum3_2 * scale_requant_in3 + bias3) * scale_requant_out3); outptr3[3] = float2int8(((float)sum3_3 * scale_requant_in3 + bias3) * scale_requant_out3); #endif outptr0 += 4; outptr1 += 4; outptr2 += 4; outptr3 += 4; } for (; i < size; i++) { signed char* tmpptr = bottom_tm.channel(i / 4 + i % 4); const signed char* kptr = kernel.channel(p / 4); #if 1 //__ARM_NEON int32x4_t _sum = vdupq_n_s32(0); int q = 0; for (; q + 3 < inch; q = q + 4) { int8x8_t _r0 = vld1_s8(tmpptr); // i0[0-3] int8x8x2_t _k = vld2_s8(kptr); // k0[0-1], k1[0-1], k2[0-1], k3[0-1];k0[2-3], k1[2-3], k2[2-3], k3[2-3] int16x8_t _r0_s16 = vmovl_s8(_r0); // i0[0],i0[1],i0[2],i0[3] int16x8_t _k02_s16 = vmovl_s8(_k.val[0]); // k0[0],k1[0],k2[0],k3[0],k0[2],k1[2],k2[2],k3[2] int16x8_t _k13_s16 = vmovl_s8(_k.val[1]); // k0[1],k1[1],k2[1],k3[1],k0[3],k1[3],k2[3],k3[3] _sum = vmlal_lane_s16(_sum, vget_low_s16(_k02_s16), vget_low_s16(_r0_s16), 0); // i0[0]*k[0-3][0] _sum = vmlal_lane_s16(_sum, vget_low_s16(_k13_s16), vget_low_s16(_r0_s16), 1); // i0[1]*k[0-3][1] _sum = vmlal_lane_s16(_sum, vget_high_s16(_k02_s16), vget_low_s16(_r0_s16), 2); // i0[2]*k[0-3][2] _sum = vmlal_lane_s16(_sum, vget_high_s16(_k13_s16), vget_low_s16(_r0_s16), 3); // i0[3]*k[0-3][3] tmpptr += 4; kptr += 16; } for (; q + 1 < inch; q = q + 2) { int8x8_t _r0 = vld1_s8(tmpptr); // i0[0-3] int8x8_t _k = vld1_s8(kptr); // k0[0-1], k1[0-1], k2[0-1], k3[0-1] _r0[2] = _r0[0]; _r0[3] = _r0[1]; _r0[4] = _r0[0]; _r0[5] = _r0[1]; _r0[6] = _r0[0]; _r0[7] = _r0[1]; int16x8_t _tp0 = vmull_s8(_k, _r0); _sum = vpadalq_s16(_sum, _tp0); tmpptr += 2; kptr += 8; } for (; q < inch; q++) { int8x8_t _r0 = vld1_s8(tmpptr); // i0[0-3] int8x8_t _k = vld1_s8(kptr); // k[0-3][0] int16x8_t _tp0 = vmull_s8(_k, _r0); _sum = vaddw_s16(_sum, vget_low_s16(_tp0)); tmpptr += 1; kptr += 4; } // top_s32 -> top_f32 float32x4_t _sum_f32 = vcvtq_f32_s32(_sum); // top_f32 = top_f32 * scale_in _sum_f32 = vmulq_f32(_sum_f32, _scale_in03); // top_f32 = top_f32 + bias _sum_f32 = vaddq_f32(_sum_f32, _bias03); // top_f32 = top_f32 * scale_out _sum_f32 = vmulq_f32(_sum_f32, _scale_out03); // top_f32 -> top_s32 _sum = vcvtaq_s32_f32(_sum_f32); // top_s32 -> top_s16 int16x4_t _sum_s16 = vqmovn_s32(_sum); int16x8_t _sum_s16_tp = vcombine_s16(_sum_s16, _sum_s16); // top_s16 -> top_s8 int8x8_t _sum_s8 = vqmovn_s16(_sum_s16_tp); // save top_s8 vst1_lane_s8(outptr0, _sum_s8, 0); vst1_lane_s8(outptr1, _sum_s8, 1); vst1_lane_s8(outptr2, _sum_s8, 2); vst1_lane_s8(outptr3, _sum_s8, 3); #else int sum0 = 0; int sum1 = 0; int sum2 = 0; int sum3 = 0; int q = 0; for (; q + 1 < inch; q = q + 2) { sum0 += tmpptr[0] * kptr[0]; sum0 += tmpptr[1] * kptr[1]; sum1 += tmpptr[0] * kptr[2]; sum1 += tmpptr[1] * kptr[3]; sum2 += tmpptr[0] * kptr[4]; sum2 += tmpptr[1] * kptr[5]; sum3 += tmpptr[0] * kptr[6]; sum3 += tmpptr[1] * kptr[7]; tmpptr += 2; kptr += 8; } for (; q < inch; q++) { sum0 += tmpptr[0] * kptr[0]; sum1 += tmpptr[0] * kptr[1]; sum2 += tmpptr[0] * kptr[2]; sum3 += tmpptr[0] * kptr[3]; tmpptr += 1; kptr += 4; } outptr0[0] = float2int8(((float)sum0 * scale_requant_in0 + bias0) * scale_requant_out0); outptr1[0] = float2int8(((float)sum1 * scale_requant_in1 + bias1) * scale_requant_out1); outptr2[0] = float2int8(((float)sum2 * scale_requant_in2 + bias2) * scale_requant_out2); outptr3[0] = float2int8(((float)sum3 * scale_requant_in3 + bias3) * scale_requant_out3); #endif outptr0++; outptr1++; outptr2++; outptr3++; } } #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { Mat out0 = top_blob.channel(p); signed char* outptr0 = out0; const float bias0 = bias ? bias[p] : 0.f; const float scale_requant_in = scales_requant[2 * p]; const float scale_requant_out = scales_requant[2 * p + 1]; float32x4_t _bias0 = vdupq_n_f32(bias0); float32x4_t _scale_in = vdupq_n_f32(scale_requant_in); float32x4_t _scale_out = vdupq_n_f32(scale_requant_out); int i = 0; for (; i + 3 < size; i += 4) { signed char* tmpptr = bottom_tm.channel(i / 4); const signed char* kptr = kernel.channel(p / 4 + p % 4); #if 1 //__ARM_NEON int32x4_t _sum = vdupq_n_s32(0); int q = 0; for (; q + 1 < inch; q = q + 2) { int8x8_t _r0 = vld1_s8(tmpptr); // i0[0-1], i1[0-1], i2[0-1], i3[0-1] int8x8_t _k = vld1_s8(kptr); // k0[0-1] _k[2] = _k[0]; _k[3] = _k[1]; _k[4] = _k[0]; _k[5] = _k[1]; _k[6] = _k[0]; _k[7] = _k[1]; int16x8_t _tp0 = vmull_s8(_k, _r0); _sum = vpadalq_s16(_sum, _tp0); tmpptr += 8; kptr += 2; } for (; q < inch; q++) { int8x8_t _r0 = vld1_s8(tmpptr); // i0[0], i1[0], i2[0], i3[0] int8x8_t _k = vld1_s8(kptr); // k[0][0] int16x8_t _r0_s16 = vmovl_s8(_r0); int16x8_t _k_s16 = vmovl_s8(_k); _sum = vmlal_lane_s16(_sum, vget_low_s16(_r0_s16), vget_low_s16(_k_s16), 0); // i0k0, i1k0, i2k0, i3k0 tmpptr += 4; kptr += 1; } // top_s32 -> top_f32 float32x4_t _sum_f32 = vcvtq_f32_s32(_sum); // top_f32 = top_f32 * scale_in _sum_f32 = vmulq_f32(_sum_f32, _scale_in); // top_f32 = top_f32 + bias _sum_f32 = vaddq_f32(_sum_f32, _bias0); // top_f32 = top_f32 * scale_out _sum_f32 = vmulq_f32(_sum_f32, _scale_out); // top_f32 -> top_s32 _sum = vcvtaq_s32_f32(_sum_f32); // top_s32 -> top_s16 int16x4_t _sum_s16 = vqmovn_s32(_sum); int16x8_t _sum_s16_tp = vcombine_s16(_sum_s16, _sum_s16); // top_s16 -> top_s8 int8x8_t _sum_s8 = vqmovn_s16(_sum_s16_tp); // save top_s8 vst1_s8(outptr0, _sum_s8); #else int sum0 = 0; int sum1 = 0; int sum2 = 0; int sum3 = 0; int q = 0; for (; q + 1 < inch; q = q + 2) { sum0 += tmpptr[0] * kptr[0]; sum0 += tmpptr[1] * kptr[1]; sum1 += tmpptr[2] * kptr[0]; sum1 += tmpptr[3] * kptr[1]; sum2 += tmpptr[4] * kptr[0]; sum2 += tmpptr[5] * kptr[1]; sum3 += tmpptr[6] * kptr[0]; sum3 += tmpptr[7] * kptr[1]; tmpptr += 8; kptr += 2; } for (; q < inch; q++) { sum0 += tmpptr[0] * kptr[0]; sum1 += tmpptr[1] * kptr[0]; sum2 += tmpptr[2] * kptr[0]; sum3 += tmpptr[3] * kptr[0]; tmpptr += 4; kptr++; } outptr0[0] = float2int8(((float)sum0 * scale_requant_in + bias0) * scale_requant_out); outptr0[1] = float2int8(((float)sum1 * scale_requant_in + bias0) * scale_requant_out); outptr0[2] = float2int8(((float)sum2 * scale_requant_in + bias0) * scale_requant_out); outptr0[3] = float2int8(((float)sum3 * scale_requant_in + bias0) * scale_requant_out); #endif outptr0 += 4; } for (; i < size; i++) { signed char* tmpptr = bottom_tm.channel(i / 4 + i % 4); const signed char* kptr = kernel.channel(p / 4 + p % 4); int q = 0; int sum0 = 0; for (; q < inch; q++) { sum0 += tmpptr[0] * kptr[0]; tmpptr++; kptr++; } outptr0[0] = float2int8(((float)sum0 * scale_requant_in + bias0) * scale_requant_out); outptr0++; } } } #endif #else static void conv1x1s1_sgemm_transform_kernel_int8_neon(const Mat& _kernel, Mat& kernel_tm, int inch, int outch) { const signed char* kernel = _kernel; kernel_tm.create(4 * 4, inch / 4 + inch % 4, outch / 4 + outch % 4, (size_t)1u); int p = 0; for (; p + 3 < outch; p += 4) { const signed char* kernel0 = kernel + (p + 0) * inch; const signed char* kernel1 = kernel + (p + 1) * inch; const signed char* kernel2 = kernel + (p + 2) * inch; const signed char* kernel3 = kernel + (p + 3) * inch; signed char* ktmp = kernel_tm.channel(p / 4); for (int q = 0; q < inch; q++) { // kernel0...3 0 ktmp[0] = kernel0[0]; ktmp[1] = kernel1[0]; ktmp[2] = kernel2[0]; ktmp[3] = kernel3[0]; ktmp += 4; kernel0 += 1; kernel1 += 1; kernel2 += 1; kernel3 += 1; } } for (; p < outch; p++) { const signed char* kernel0 = kernel + p * inch; signed char* ktmp = kernel_tm.channel(p / 4 + p % 4); for (int q = 0; q < inch; q++) { ktmp[0] = kernel0[0]; ktmp++; kernel0++; } } } /* * Convolution 1x1 quantized with sgemm int8 */ static void conv1x1s1_sgemm_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outch = top_blob.c; const int size = w * h; // interleave Mat tmp(8 * 4, inch / 4 + inch % 4, size / 8 + (size % 8) / 4 + size % 4, 1u, opt.workspace_allocator); { int nn_size = size >> 3; int remain_size_start = nn_size << 3; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = ii * 8; const signed char* img0 = bottom_blob.channel(0); img0 += i; signed char* tmpptr = tmp.channel(i / 8); for (int q = 0; q < inch; q++) { #if __ARM_NEON asm volatile( "pld [%0, #64] \n" "vld1.s8 {d0}, [%0] \n" "vst1.s8 {d0}, [%1]! \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "d0"); img0 += bottom_blob.cstep; #else tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr[2] = img0[2]; tmpptr[3] = img0[3]; tmpptr[4] = img0[4]; tmpptr[5] = img0[5]; tmpptr[6] = img0[6]; tmpptr[7] = img0[7]; tmpptr += 8; img0 += bottom_blob.cstep; #endif // __ARM_NEON } } nn_size = (size - remain_size_start) >> 2; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 4; const signed char* img0 = bottom_blob.channel(0); img0 += i; signed char* tmpptr = tmp.channel(i / 8 + (i % 8) / 4); for (int q = 0; q < inch; q++) { tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr[2] = img0[2]; tmpptr[3] = img0[3]; tmpptr += 4; img0 += bottom_blob.cstep; } } remain_size_start += nn_size << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int i = remain_size_start; i < size; i++) { const signed char* img0 = bottom_blob.channel(0); img0 += i; signed char* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4); for (int q = 0; q < inch; q++) { tmpptr[0] = img0[0]; tmpptr++; img0 += bottom_blob.cstep; } } } // sgemm process int nn_outch = 0; int remain_outch_start = 0; nn_outch = (outch - remain_outch_start) >> 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = remain_outch_start + pp * 4; int* outptr0 = top_blob.channel(p); int* outptr1 = top_blob.channel(p + 1); int* outptr2 = top_blob.channel(p + 2); int* outptr3 = top_blob.channel(p + 3); int i = 0; for (; i + 7 < size; i += 8) { const signed char* tmpptr = tmp.channel(i / 8); const signed char* kptr = kernel.channel(p / 4); #if __ARM_NEON asm volatile( // inch loop "vmov.s32 q6, #0 \n" "vmov.s32 q7, #0 \n" "vmov.s32 q8, #0 \n" "vmov.s32 q9, #0 \n" "vmov.s32 q10, #0 \n" "vmov.s32 q11, #0 \n" "vmov.s32 q12, #0 \n" "vmov.s32 q13, #0 \n" "lsr r4, %12, #2 \n" // r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n" // for(; nn != 0; nn--) "pld [%4, #128] \n" "vld1.s8 {d4-d7}, [%4]! \n" // tmpr a00-a07,a10-a17,a20-a27,a30-a37 a(inch)(data) "vmovl.s8 q5, d7 \n" // a30-a37 "vmovl.s8 q4, d6 \n" // a20-a27 "vmovl.s8 q3, d5 \n" // a10-a17 "vmovl.s8 q2, d4 \n" // a00-a07 "vld1.s8 {d0-d1}, [%5]! \n" // kptr k00-k30,k01-k31,k02-k32,k03-k33 k(outch)(inch) "vmovl.s8 q1, d1 \n" // k02-k32,k03-k33 "vmovl.s8 q0, d0 \n" // k00-k30,k01-k31 "vmlal.s16 q6, d4, d0[0] \n" // sum0 = (a00-a07) * k00 "vmlal.s16 q7, d5, d0[0] \n" "vmlal.s16 q8, d4, d0[1] \n" // sum1 = (a00-a07) * k10 "vmlal.s16 q9, d5, d0[1] \n" "vmlal.s16 q10, d4, d0[2] \n" // sum2 = (a00-a07) * k20 "vmlal.s16 q11, d5, d0[2] \n" "vmlal.s16 q12, d4, d0[3] \n" // sum3 = (a00-a07) * k30 "vmlal.s16 q13, d5, d0[3] \n" "vmlal.s16 q6, d6, d1[0] \n" // sum0 += (a10-a17) * k01 "vmlal.s16 q7, d7, d1[0] \n" "vmlal.s16 q8, d6, d1[1] \n" // sum1 += (a10-a17) * k11 "vmlal.s16 q9, d7, d1[1] \n" "vmlal.s16 q10, d6, d1[2] \n" // sum2 += (a10-a17) * k21 "vmlal.s16 q11, d7, d1[2] \n" "vmlal.s16 q12, d6, d1[3] \n" // sum3 += (a10-a17) * k31 "vmlal.s16 q13, d7, d1[3] \n" "vmlal.s16 q6, d8, d2[0] \n" // sum0 += (a20-a27) * k02 "vmlal.s16 q7, d9, d2[0] \n" "vmlal.s16 q8, d8, d2[1] \n" // sum1 += (a20-a27) * k12 "vmlal.s16 q9, d9, d2[1] \n" "vmlal.s16 q10, d8, d2[2] \n" // sum2 += (a20-a27) * k22 "vmlal.s16 q11, d9, d2[2] \n" "vmlal.s16 q12, d8, d2[3] \n" // sum3 += (a20-a27) * k32 "vmlal.s16 q13, d9, d2[3] \n" "vmlal.s16 q6, d10, d3[0] \n" // sum0 += (a30-a37) * k03 "vmlal.s16 q7, d11, d3[0] \n" "vmlal.s16 q8, d10, d3[1] \n" // sum1 += (a30-a37) * k13 "vmlal.s16 q9, d11, d3[1] \n" "vmlal.s16 q10, d10, d3[2] \n" // sum2 += (a30-a37) * k23 "vmlal.s16 q11, d11, d3[2] \n" "vmlal.s16 q12, d10, d3[3] \n" // sum3 += (a30-a37) * k33 "vmlal.s16 q13, d11, d3[3] \n" "subs r4, r4, #1 \n" "bne 0b \n" // end for "1: \n" // remain loop "and r4, %12, #3 \n" // r4 = remain = inch & 3 "cmp r4, #0 \n" "beq 3f \n" "2: \n" // for(; remain != 0; remain--) "vld1.s8 {d2}, [%4]! \n" // tmpr a00-a07 a(inch)(data) "vld1.s8 {d0}, [%5] \n" // kptr k00-k30 k(outch)(inch) "vmovl.s8 q1, d2 \n" "vmovl.s8 q0, d0 \n" "add %5, #4 \n" "vmlal.s16 q6, d2, d0[0] \n" // sum0 += (a00-a07) * k00 "vmlal.s16 q7, d3, d0[0] \n" "vmlal.s16 q8, d2, d0[1] \n" // sum1 += (a00-a07) * k10 "vmlal.s16 q9, d3, d0[1] \n" "vmlal.s16 q10, d2, d0[2] \n" // sum2 += (a00-a07) * k20 "vmlal.s16 q11, d3, d0[2] \n" "vmlal.s16 q12, d2, d0[3] \n" // sum3 += (a00-a07) * k30 "vmlal.s16 q13, d3, d0[3] \n" "subs r4, r4, #1 \n" "bne 2b \n" "3: \n" // store the result to memory "vst1.s32 {d12-d15}, [%0]! \n" "vst1.s32 {d16-d19}, [%1]! \n" "vst1.s32 {d20-d23}, [%2]! \n" "vst1.s32 {d24-d27}, [%3]! \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(tmpptr), // %4 "=r"(kptr) // %5 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(tmpptr), "5"(kptr), "r"(inch) // %12 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #else int sum0_0 = 0; int sum0_1 = 0; int sum0_2 = 0; int sum0_3 = 0; int sum0_4 = 0; int sum0_5 = 0; int sum0_6 = 0; int sum0_7 = 0; int sum1_0 = 0; int sum1_1 = 0; int sum1_2 = 0; int sum1_3 = 0; int sum1_4 = 0; int sum1_5 = 0; int sum1_6 = 0; int sum1_7 = 0; int sum2_0 = 0; int sum2_1 = 0; int sum2_2 = 0; int sum2_3 = 0; int sum2_4 = 0; int sum2_5 = 0; int sum2_6 = 0; int sum2_7 = 0; int sum3_0 = 0; int sum3_1 = 0; int sum3_2 = 0; int sum3_3 = 0; int sum3_4 = 0; int sum3_5 = 0; int sum3_6 = 0; int sum3_7 = 0; for (int q = 0; q < inch; q++) { sum0_0 += tmpptr[0] * kptr[0]; sum0_1 += tmpptr[1] * kptr[0]; sum0_2 += tmpptr[2] * kptr[0]; sum0_3 += tmpptr[3] * kptr[0]; sum0_4 += tmpptr[4] * kptr[0]; sum0_5 += tmpptr[5] * kptr[0]; sum0_6 += tmpptr[6] * kptr[0]; sum0_7 += tmpptr[7] * kptr[0]; sum1_0 += tmpptr[0] * kptr[1]; sum1_1 += tmpptr[1] * kptr[1]; sum1_2 += tmpptr[2] * kptr[1]; sum1_3 += tmpptr[3] * kptr[1]; sum1_4 += tmpptr[4] * kptr[1]; sum1_5 += tmpptr[5] * kptr[1]; sum1_6 += tmpptr[6] * kptr[1]; sum1_7 += tmpptr[7] * kptr[1]; sum2_0 += tmpptr[0] * kptr[2]; sum2_1 += tmpptr[1] * kptr[2]; sum2_2 += tmpptr[2] * kptr[2]; sum2_3 += tmpptr[3] * kptr[2]; sum2_4 += tmpptr[4] * kptr[2]; sum2_5 += tmpptr[5] * kptr[2]; sum2_6 += tmpptr[6] * kptr[2]; sum2_7 += tmpptr[7] * kptr[2]; sum3_0 += tmpptr[0] * kptr[3]; sum3_1 += tmpptr[1] * kptr[3]; sum3_2 += tmpptr[2] * kptr[3]; sum3_3 += tmpptr[3] * kptr[3]; sum3_4 += tmpptr[4] * kptr[3]; sum3_5 += tmpptr[5] * kptr[3]; sum3_6 += tmpptr[6] * kptr[3]; sum3_7 += tmpptr[7] * kptr[3]; tmpptr += 8; kptr += 4; } outptr0[0] = sum0_0; outptr0[1] = sum0_1; outptr0[2] = sum0_2; outptr0[3] = sum0_3; outptr0[4] = sum0_4; outptr0[5] = sum0_5; outptr0[6] = sum0_6; outptr0[7] = sum0_7; outptr1[0] = sum1_0; outptr1[1] = sum1_1; outptr1[2] = sum1_2; outptr1[3] = sum1_3; outptr1[4] = sum1_4; outptr1[5] = sum1_5; outptr1[6] = sum1_6; outptr1[7] = sum1_7; outptr2[0] = sum2_0; outptr2[1] = sum2_1; outptr2[2] = sum2_2; outptr2[3] = sum2_3; outptr2[4] = sum2_4; outptr2[5] = sum2_5; outptr2[6] = sum2_6; outptr2[7] = sum2_7; outptr3[0] = sum3_0; outptr3[1] = sum3_1; outptr3[2] = sum3_2; outptr3[3] = sum3_3; outptr3[4] = sum3_4; outptr3[5] = sum3_5; outptr3[6] = sum3_6; outptr3[7] = sum3_7; outptr0 += 8; outptr1 += 8; outptr2 += 8; outptr3 += 8; #endif // __ARM_NEON } for (; i + 3 < size; i += 4) { const signed char* tmpptr = tmp.channel(i / 8 + (i % 8) / 4); const signed char* kptr = kernel.channel(p / 4); #if __ARM_NEON asm volatile( // inch loop "vmov.s32 q6, #0 \n" "vmov.s32 q7, #0 \n" "vmov.s32 q8, #0 \n" "vmov.s32 q9, #0 \n" "lsr r4, %12, #2 \n" // r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n" // for(; nn != 0; nn--) "pld [%4, #128] \n" "vld1.s8 {d4-d5}, [%4]! \n" // tmpr a00-a03,a10-a13,a20-a23,a30-a33 a(inch)(data) "vmovl.s8 q3, d5 \n" // a20-a23,a30-a33 "vmovl.s8 q2, d4 \n" // a00-a04,a10-a14 "vld1.s8 {d0-d1}, [%5]! \n" // kptr k00-k30,k01-k31,k02-k32,k03-k33 k(outch)(inch) "vmovl.s8 q1, d1 \n" // k02-k32,k03-k33 "vmovl.s8 q0, d0 \n" // k00-k30,k01-k31 "vmlal.s16 q6, d4, d0[0] \n" // sum0 = (a00-a03) * k00 "vmlal.s16 q7, d4, d0[1] \n" // sum1 = (a00-a03) * k10 "vmlal.s16 q8, d4, d0[2] \n" // sum2 = (a00-a03) * k20 "vmlal.s16 q9, d4, d0[3] \n" // sum3 = (a00-a03) * k30 "vmlal.s16 q6, d5, d1[0] \n" // sum0 += (a10-a13) * k01 "vmlal.s16 q7, d5, d1[1] \n" // sum1 += (a10-a13) * k11 "vmlal.s16 q8, d5, d1[2] \n" // sum2 += (a10-a13) * k21 "vmlal.s16 q9, d5, d1[3] \n" // sum3 += (a10-a13) * k31 "vmlal.s16 q6, d6, d2[0] \n" // sum0 += (a20-a23) * k02 "vmlal.s16 q7, d6, d2[1] \n" // sum1 += (a20-a23) * k12 "vmlal.s16 q8, d6, d2[2] \n" // sum2 += (a20-a23) * k22 "vmlal.s16 q9, d6, d2[3] \n" // sum3 += (a20-a23) * k32 "vmlal.s16 q6, d7, d3[0] \n" // sum0 += (a30-a33) * k03 "vmlal.s16 q7, d7, d3[1] \n" // sum1 += (a30-a33) * k13 "vmlal.s16 q8, d7, d3[2] \n" // sum2 += (a30-a33) * k23 "vmlal.s16 q9, d7, d3[3] \n" // sum3 += (a30-a33) * k33 "subs r4, r4, #1 \n" "bne 0b \n" // end for "1: \n" // remain loop "and r4, %12, #3 \n" // r4 = remain = inch & 3 "cmp r4, #0 \n" "beq 3f \n" "2: \n" // for(; remain != 0; remain--) "vld1.s8 {d2}, [%4] \n" // tmpr a00-a03 a(inch)(data) "vld1.s8 {d0}, [%5] \n" // kptr k00-k30 k(outch)(inch) "vmovl.s8 q1, d2 \n" "vmovl.s8 q0, d0 \n" "add %4, #4 \n" "add %5, #4 \n" "vmlal.s16 q6, d2, d0[0] \n" // sum0 += (a00-a03) * k00 "vmlal.s16 q7, d2, d0[1] \n" // sum1 += (a00-a03) * k10 "vmlal.s16 q8, d2, d0[2] \n" // sum2 += (a00-a03) * k20 "vmlal.s16 q9, d2, d0[3] \n" // sum3 += (a00-a03) * k30 "subs r4, r4, #1 \n" "bne 2b \n" "3: \n" // store the result to memory "vst1.s32 {d12-d13}, [%0]! \n" "vst1.s32 {d14-d15}, [%1]! \n" "vst1.s32 {d16-d17}, [%2]! \n" "vst1.s32 {d18-d19}, [%3]! \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(tmpptr), // %4 "=r"(kptr) // %5 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(tmpptr), "5"(kptr), "r"(inch) // %12 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #else int sum0_0 = 0; int sum0_1 = 0; int sum0_2 = 0; int sum0_3 = 0; int sum1_0 = 0; int sum1_1 = 0; int sum1_2 = 0; int sum1_3 = 0; int sum2_0 = 0; int sum2_1 = 0; int sum2_2 = 0; int sum2_3 = 0; int sum3_0 = 0; int sum3_1 = 0; int sum3_2 = 0; int sum3_3 = 0; for (int q = 0; q < inch; q++) { sum0_0 += tmpptr[0] * kptr[0]; sum0_1 += tmpptr[1] * kptr[0]; sum0_2 += tmpptr[2] * kptr[0]; sum0_3 += tmpptr[3] * kptr[0]; sum1_0 += tmpptr[0] * kptr[1]; sum1_1 += tmpptr[1] * kptr[1]; sum1_2 += tmpptr[2] * kptr[1]; sum1_3 += tmpptr[3] * kptr[1]; sum2_0 += tmpptr[0] * kptr[2]; sum2_1 += tmpptr[1] * kptr[2]; sum2_2 += tmpptr[2] * kptr[2]; sum2_3 += tmpptr[3] * kptr[2]; sum3_0 += tmpptr[0] * kptr[3]; sum3_1 += tmpptr[1] * kptr[3]; sum3_2 += tmpptr[2] * kptr[3]; sum3_3 += tmpptr[3] * kptr[3]; tmpptr += 4; kptr += 4; } outptr0[0] = sum0_0; outptr0[1] = sum0_1; outptr0[2] = sum0_2; outptr0[3] = sum0_3; outptr1[0] = sum1_0; outptr1[1] = sum1_1; outptr1[2] = sum1_2; outptr1[3] = sum1_3; outptr2[0] = sum2_0; outptr2[1] = sum2_1; outptr2[2] = sum2_2; outptr2[3] = sum2_3; outptr3[0] = sum3_0; outptr3[1] = sum3_1; outptr3[2] = sum3_2; outptr3[3] = sum3_3; outptr0 += 4; outptr1 += 4; outptr2 += 4; outptr3 += 4; #endif // __ARM_NEON } for (; i < size; i++) { const signed char* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4); const signed char* kptr = kernel.channel(p / 4); #if __ARM_NEON asm volatile( // inch loop "veor q6, q6, q6 \n" "veor q7, q7, q7 \n" "veor q8, q8, q8 \n" "veor q9, q9, q9 \n" "vmov.s32 q10, #0 \n" "lsr r4, %12, #2 \n" // r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n" // for(; nn != 0; nn--) "pld [%4, #128] \n" "vld1.s8 {d4}, [%4] \n" // tmpr a00,a10,a20,a30 a(inch)(data) "add %4, #4 \n" "vmovl.s8 q2, d4 \n" // a00,a10,a20,a30 "vld1.s8 {d0-d1}, [%5]! \n" // kptr k00-k30,k01-k31,k02-k32,k03-k33 k(outch)(inch) "vmovl.s8 q1, d1 \n" // k02-k32,k03-k33 "vmovl.s8 q0, d0 \n" // k00-k30,k01-k31 "vmlal.s16 q6, d0, d4[0] \n" // (k00-k30) * a00 "vmlal.s16 q7, d1, d4[1] \n" // (k01-k31) * a10 "vmlal.s16 q8, d2, d4[2] \n" // (k02-k32) * a20 "vmlal.s16 q9, d3, d4[3] \n" // (k03-k33) * a30 "subs r4, r4, #1 \n" "bne 0b \n" // end for "vadd.s32 q6, q6, q7 \n" "vadd.s32 q9, q9, q8 \n" "vadd.s32 q10, q6, q9 \n" "1: \n" // remain loop "and r4, %12, #3 \n" // r4 = remain = inch & 3 "cmp r4, #0 \n" "beq 3f \n" "2: \n" // for(; remain != 0; remain--) "vld1.s8 {d2}, [%4] \n" // tmpr a00 a(inch)(data) "vld1.s8 {d0}, [%5] \n" // kptr k00-k30 k(outch)(inch) "vmovl.s8 q1, d2 \n" "vmovl.s8 q0, d0 \n" "add %4, #1 \n" "add %5, #4 \n" "vmlal.s16 q10, d0, d2[0] \n" "subs r4, r4, #1 \n" "bne 2b \n" "3: \n" // store the result to memory "vst1.s32 {d20[0]}, [%0]! \n" "vst1.s32 {d20[1]}, [%1]! \n" "vst1.s32 {d21[0]}, [%2]! \n" "vst1.s32 {d21[1]}, [%3]! \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(tmpptr), // %4 "=r"(kptr) // %5 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(tmpptr), "5"(kptr), "r"(inch) // %12 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #else int sum0 = 0; int sum1 = 0; int sum2 = 0; int sum3 = 0; for (int q = 0; q < inch; q++) { sum0 += tmpptr[0] * kptr[0]; sum1 += tmpptr[0] * kptr[1]; sum2 += tmpptr[0] * kptr[2]; sum3 += tmpptr[0] * kptr[3]; tmpptr++; kptr += 4; } outptr0[0] = sum0; outptr1[0] = sum1; outptr2[0] = sum2; outptr3[0] = sum3; outptr0++; outptr1++; outptr2++; outptr3++; #endif // __ARM_NEON } } remain_outch_start += nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { Mat out0 = top_blob.channel(p); int* outptr0 = out0; int i = 0; for (; i + 7 < size; i += 8) { const signed char* tmpptr = tmp.channel(i / 8); const signed char* kptr = kernel.channel(p / 4 + p % 4); #if __ARM_NEON asm volatile( // inch loop "vmov.s32 q6, #0 \n" "vmov.s32 q7, #0 \n" "lsr r4, %6, #2 \n" // r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n" // for(; nn != 0; nn--) "pld [%1, #128] \n" "vld1.s8 {d4-d7}, [%1]! \n" // tmpr a00-a07,a10-a17,a20-a27,a30-a37 a(inch)(data) "vmovl.s8 q5, d7 \n" // a30-a37 "vmovl.s8 q4, d6 \n" // a20-a27 "vmovl.s8 q3, d5 \n" // a10-a17 "vmovl.s8 q2, d4 \n" // a00-a07 "vld1.s8 {d0}, [%2] \n" // kptr k00,k01,k02,k03 k(outch)(inch) "vmovl.s8 q0, d0 \n" // k00,k01,k02,k03 "add %2, #4 \n" "vmlal.s16 q6, d4, d0[0] \n" // (a00-a07) * k00 "vmlal.s16 q7, d5, d0[0] \n" "vmlal.s16 q6, d6, d0[1] \n" // (a10-a17) * k01 "vmlal.s16 q7, d7, d0[1] \n" "vmlal.s16 q6, d8, d0[2] \n" // (a20-a27) * k02 "vmlal.s16 q7, d9, d0[2] \n" "vmlal.s16 q6, d10, d0[3] \n" // (a30-a37) * k03 "vmlal.s16 q7, d11, d0[3] \n" "subs r4, r4, #1 \n" "bne 0b \n" // end for "1: \n" // remain loop "and r4, %6, #3 \n" // r4 = remain = inch & 3 "cmp r4, #0 \n" "beq 3f \n" "2: \n" // for(; remain != 0; remain--) "vld1.s8 {d2}, [%1]! \n" // tmpr a00-a07 a(inch)(data) "vld1.s8 {d0}, [%2] \n" // kptr k00 k(outch)(inch) "vmovl.s8 q1, d2 \n" "vmovl.s8 q0, d0 \n" "add %2, #1 \n" "vmlal.s16 q6, d2, d0[0] \n" // (a00-a07) * k00 "vmlal.s16 q7, d3, d0[0] \n" "subs r4, r4, #1 \n" "bne 2b \n" "3: \n" // store the result to memory "vst1.s32 {d12-d15}, [%0]! \n" : "=r"(outptr0), // %0 "=r"(tmpptr), // %1 "=r"(kptr) // %2 : "0"(outptr0), "1"(tmpptr), "2"(kptr), "r"(inch) // %6 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7"); #else int sum0 = 0; int sum1 = 0; int sum2 = 0; int sum3 = 0; int sum4 = 0; int sum5 = 0; int sum6 = 0; int sum7 = 0; for (int q = 0; q < inch; q++) { sum0 += tmpptr[0] * kptr[0]; sum1 += tmpptr[1] * kptr[0]; sum2 += tmpptr[2] * kptr[0]; sum3 += tmpptr[3] * kptr[0]; sum4 += tmpptr[4] * kptr[0]; sum5 += tmpptr[5] * kptr[0]; sum6 += tmpptr[6] * kptr[0]; sum7 += tmpptr[7] * kptr[0]; tmpptr += 8; kptr++; } outptr0[0] = sum0; outptr0[1] = sum1; outptr0[2] = sum2; outptr0[3] = sum3; outptr0[4] = sum4; outptr0[5] = sum5; outptr0[6] = sum6; outptr0[7] = sum7; outptr0 += 8; #endif // __ARM_NEON } for (; i + 3 < size; i += 4) { const signed char* tmpptr = tmp.channel(i / 8 + (i % 8) / 4); const signed char* kptr = kernel.channel(p / 4 + p % 4); #if __ARM_NEON asm volatile( // inch loop "vmov.s32 q6, #0 \n" "lsr r4, %6, #2 \n" // r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n" // for(; nn != 0; nn--) "pld [%2, #128] \n" "vld1.s8 {d4-d5}, [%1]! \n" // tmpr a00-a03,a10-a13,a20-a23,a30-a33 a(inch)(data) "vmovl.s8 q3, d5 \n" // a20-a23,a30-a33 "vmovl.s8 q2, d4 \n" // a00-a03,a10-a13 "vld1.s8 {d0}, [%2] \n" // kptr k00,k01,k02,k03 k(outch)(inch) "vmovl.s8 q0, d0 \n" // k00,k01,k02,k03 "add %2, #4 \n" "vmlal.s16 q6, d4, d0[0] \n" // (a00-a03) * k00 "vmlal.s16 q6, d5, d0[1] \n" // (a10-a13) * k01 "vmlal.s16 q6, d6, d0[2] \n" // (a20-a23) * k02 "vmlal.s16 q6, d7, d0[3] \n" // (a30-a33) * k03 "subs r4, r4, #1 \n" "bne 0b \n" // end for "1: \n" // remain loop "and r4, %6, #3 \n" // r4 = remain = inch & 3 "cmp r4, #0 \n" "beq 3f \n" "2: \n" // for(; remain != 0; remain--) "vld1.s8 {d2}, [%1] \n" // tmpr a00-a03 a(inch)(data) "vld1.s8 {d0}, [%2] \n" // kptr k00 k(outch)(inch) "vmovl.s8 q1, d2 \n" "vmovl.s8 q0, d0 \n" "add %1, #4 \n" "add %2, #1 \n" "vmlal.s16 q6, d2, d0[0] \n" // (a00-a03) * k00 "subs r4, r4, #1 \n" "bne 2b \n" "3: \n" // store the result to memory "vst1.s32 {d12-d13}, [%0]! \n" : "=r"(outptr0), // %0 "=r"(tmpptr), // %1 "=r"(kptr) // %2 : "0"(outptr0), "1"(tmpptr), "2"(kptr), "r"(inch) // %6 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6"); #else int sum0 = 0; int sum1 = 0; int sum2 = 0; int sum3 = 0; for (int q = 0; q < inch; q++) { sum0 += tmpptr[0] * kptr[0]; sum1 += tmpptr[1] * kptr[0]; sum2 += tmpptr[2] * kptr[0]; sum3 += tmpptr[3] * kptr[0]; tmpptr += 4; kptr++; } outptr0[0] = sum0; outptr0[1] = sum1; outptr0[2] = sum2; outptr0[3] = sum3; outptr0 += 4; #endif // __ARM_NEON } for (; i < size; i++) { const signed char* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4); const signed char* kptr = kernel.channel(p / 4 + p % 4); int q = 0; int sum0 = 0; for (; q < inch; q++) { sum0 += tmpptr[0] * kptr[0]; tmpptr++; kptr++; } outptr0[0] = sum0; outptr0++; } } // // NOTE sgemm int8 // for (; p<outch; p++) // { // Mat out0 = top_blob.channel(p); // // int* outptr0 = out0; // // for (int i=0; i<size; i++) // { // int sum = 0; // // const signed char* kptr = _kernel.channel(p/8 + p%8); // // for (int q=0; q<inch; q++) // { // const signed char* img0 = bottom_blob.channel(q); // // sum += img0[i] * kptr[0]; // kptr ++; // } // // outptr0[i] = sum; // } // } } static void conv1x1s1_sgemm_int8_requant_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, std::vector<float> scales_requant, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outch = top_blob.c; const int size = w * h; const float* bias = _bias; // interleave Mat tmp(8 * 4, inch / 4 + inch % 4, size / 8 + (size % 8) / 4 + size % 4, 1u, opt.workspace_allocator); { int nn_size = size >> 3; int remain_size_start = nn_size << 3; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = ii * 8; const signed char* img0 = bottom_blob.channel(0); img0 += i; signed char* tmpptr = tmp.channel(i / 8); for (int q = 0; q < inch; q++) { #if __ARM_NEON asm volatile( "pld [%0, #64] \n" "vld1.s8 {d0}, [%0] \n" "vst1.s8 {d0}, [%1]! \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "d0"); img0 += bottom_blob.cstep; #else tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr[2] = img0[2]; tmpptr[3] = img0[3]; tmpptr[4] = img0[4]; tmpptr[5] = img0[5]; tmpptr[6] = img0[6]; tmpptr[7] = img0[7]; tmpptr += 8; img0 += bottom_blob.cstep; #endif // __ARM_NEON } } nn_size = (size - remain_size_start) >> 2; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 4; const signed char* img0 = bottom_blob.channel(0); img0 += i; signed char* tmpptr = tmp.channel(i / 8 + (i % 8) / 4); for (int q = 0; q < inch; q++) { tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr[2] = img0[2]; tmpptr[3] = img0[3]; tmpptr += 4; img0 += bottom_blob.cstep; } } remain_size_start += nn_size << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int i = remain_size_start; i < size; i++) { const signed char* img0 = bottom_blob.channel(0); img0 += i; signed char* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4); for (int q = 0; q < inch; q++) { tmpptr[0] = img0[0]; tmpptr++; img0 += bottom_blob.cstep; } } } // sgemm process int nn_outch = 0; int remain_outch_start = 0; nn_outch = (outch - remain_outch_start) >> 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = remain_outch_start + pp * 4; signed char* outptr0 = top_blob.channel(p); signed char* outptr1 = top_blob.channel(p + 1); signed char* outptr2 = top_blob.channel(p + 2); signed char* outptr3 = top_blob.channel(p + 3); const float bias0 = bias ? bias[p] : 0.f; const float bias1 = bias ? bias[p + 1] : 0.f; const float bias2 = bias ? bias[p + 2] : 0.f; const float bias3 = bias ? bias[p + 3] : 0.f; const float scale_requant_in0 = scales_requant[2 * p]; const float scale_requant_out0 = scales_requant[2 * p + 1]; const float scale_requant_in1 = scales_requant[2 * (p + 1)]; const float scale_requant_out1 = scales_requant[2 * (p + 1) + 1]; const float scale_requant_in2 = scales_requant[2 * (p + 2)]; const float scale_requant_out2 = scales_requant[2 * (p + 2) + 1]; const float scale_requant_in3 = scales_requant[2 * (p + 3)]; const float scale_requant_out3 = scales_requant[2 * (p + 3) + 1]; #if __ARM_NEON float32x4_t _bias03, _scale_in03, _scale_out03; _bias03[0] = bias0; _bias03[1] = bias1; _bias03[2] = bias2; _bias03[3] = bias3; _scale_in03[0] = scale_requant_in0; _scale_in03[1] = scale_requant_in1; _scale_in03[2] = scale_requant_in2; _scale_in03[3] = scale_requant_in3; _scale_out03[0] = scale_requant_out0; _scale_out03[1] = scale_requant_out1; _scale_out03[2] = scale_requant_out2; _scale_out03[3] = scale_requant_out3; #endif // __ARM_NEON int i = 0; for (; i + 7 < size; i += 8) { const signed char* tmpptr = tmp.channel(i / 8); const signed char* kptr = kernel.channel(p / 4); #if __ARM_NEON asm volatile( // inch loop "vmov.s32 q6, #0 \n" "vmov.s32 q7, #0 \n" "vmov.s32 q8, #0 \n" "vmov.s32 q9, #0 \n" "vmov.s32 q10, #0 \n" "vmov.s32 q11, #0 \n" "vmov.s32 q12, #0 \n" "vmov.s32 q13, #0 \n" "lsr r4, %12, #2 \n" // r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n" // for(; nn != 0; nn--) "pld [%4, #128] \n" "vld1.s8 {d28-d31}, [%4]! \n" // tmpr a00-a07,a10-a17,a20-a27,a30-a37 a(inch)(data) "vmovl.s8 q5, d31 \n" // a30-a37 "vmovl.s8 q4, d30 \n" // a20-a27 "vmovl.s8 q15, d29 \n" // a10-a17 "vmovl.s8 q14, d28 \n" // a00-a07 "vld1.s8 {d0-d1}, [%5]! \n" // kptr k00-k30,k01-k31,k02-k32,k03-k33 k(outch)(inch) "vmovl.s8 q1, d1 \n" // k02-k32,k03-k33 "vmovl.s8 q0, d0 \n" // k00-k30,k01-k31 "vmlal.s16 q6, d28, d0[0] \n" // sum0 = (a00-a07) * k00 "vmlal.s16 q7, d29, d0[0] \n" "vmlal.s16 q8, d28, d0[1] \n" // sum1 = (a00-a07) * k10 "vmlal.s16 q9, d29, d0[1] \n" "vmlal.s16 q10, d28, d0[2] \n" // sum2 = (a00-a07) * k20 "vmlal.s16 q11, d29, d0[2] \n" "vmlal.s16 q12, d28, d0[3] \n" // sum3 = (a00-a07) * k30 "vmlal.s16 q13, d29, d0[3] \n" "vmlal.s16 q6, d30, d1[0] \n" // sum0 += (a10-a17) * k01 "vmlal.s16 q7, d31, d1[0] \n" "vmlal.s16 q8, d30, d1[1] \n" // sum1 += (a10-a17) * k11 "vmlal.s16 q9, d31, d1[1] \n" "vmlal.s16 q10, d30, d1[2] \n" // sum2 += (a10-a17) * k21 "vmlal.s16 q11, d31, d1[2] \n" "vmlal.s16 q12, d30, d1[3] \n" // sum3 += (a10-a17) * k31 "vmlal.s16 q13, d31, d1[3] \n" "vmlal.s16 q6, d8, d2[0] \n" // sum0 += (a20-a27) * k02 "vmlal.s16 q7, d9, d2[0] \n" "vmlal.s16 q8, d8, d2[1] \n" // sum1 += (a20-a27) * k12 "vmlal.s16 q9, d9, d2[1] \n" "vmlal.s16 q10, d8, d2[2] \n" // sum2 += (a20-a27) * k22 "vmlal.s16 q11, d9, d2[2] \n" "vmlal.s16 q12, d8, d2[3] \n" // sum3 += (a20-a27) * k32 "vmlal.s16 q13, d9, d2[3] \n" "vmlal.s16 q6, d10, d3[0] \n" // sum0 += (a30-a37) * k03 "vmlal.s16 q7, d11, d3[0] \n" "vmlal.s16 q8, d10, d3[1] \n" // sum1 += (a30-a37) * k13 "vmlal.s16 q9, d11, d3[1] \n" "vmlal.s16 q10, d10, d3[2] \n" // sum2 += (a30-a37) * k23 "vmlal.s16 q11, d11, d3[2] \n" "vmlal.s16 q12, d10, d3[3] \n" // sum3 += (a30-a37) * k33 "vmlal.s16 q13, d11, d3[3] \n" "subs r4, r4, #1 \n" "bne 0b \n" // end for "1: \n" // remain loop "and r4, %12, #3 \n" // r4 = remain = inch & 3 "cmp r4, #0 \n" "beq 3f \n" "2: \n" // for(; remain != 0; remain--) "vld1.s8 {d2}, [%4]! \n" // tmpr a00-a07 a(inch)(data) "vld1.s8 {d0}, [%5] \n" // kptr k00-k30 k(outch)(inch) "vmovl.s8 q1, d2 \n" "vmovl.s8 q0, d0 \n" "add %5, #4 \n" "vmlal.s16 q6, d2, d0[0] \n" // sum0 += (a00-a07) * k00 "vmlal.s16 q7, d3, d0[0] \n" "vmlal.s16 q8, d2, d0[1] \n" // sum1 += (a00-a07) * k10 "vmlal.s16 q9, d3, d0[1] \n" "vmlal.s16 q10, d2, d0[2] \n" // sum2 += (a00-a07) * k20 "vmlal.s16 q11, d3, d0[2] \n" "vmlal.s16 q12, d2, d0[3] \n" // sum3 += (a00-a07) * k30 "vmlal.s16 q13, d3, d0[3] \n" "subs r4, r4, #1 \n" "bne 2b \n" "3: \n" // store the result to memory "vdup.f32 q14, %13 \n" // bias "vdup.f32 q15, %14 \n" // bias "vdup.f32 q4, %15 \n" // bias "vdup.f32 q5, %16 \n" // bias // sum0 // top_s32 -> top_f32 "vcvt.f32.s32 q6, q6 \n" "vcvt.f32.s32 q7, q7 \n" "vcvt.f32.s32 q8, q8 \n" "vcvt.f32.s32 q9, q9 \n" // top_f32 = top_f32 * scale_int "vmul.f32 q6, q6, %e17[0] \n" "vmul.f32 q7, q7, %e17[0] \n" "vmul.f32 q8, q8, %e17[1] \n" "vmul.f32 q9, q9, %e17[1] \n" // top_f32 = top_f32 + bias "vadd.f32 q6, q6, q14 \n" "vadd.f32 q7, q7, q14 \n" "vadd.f32 q8, q8, q15 \n" "vadd.f32 q9, q9, q15 \n" // top_f32 = top_f32 * scale_out "vmul.f32 q0, q6, %e18[0] \n" "vmul.f32 q1, q7, %e18[0] \n" // top_f32 -> top_s32 "vcvtr.s32.f32 s0, s0 \n" "vcvtr.s32.f32 s1, s1 \n" "vcvtr.s32.f32 s2, s2 \n" "vcvtr.s32.f32 s3, s3 \n" "vcvtr.s32.f32 s4, s4 \n" "vcvtr.s32.f32 s5, s5 \n" "vcvtr.s32.f32 s6, s6 \n" "vcvtr.s32.f32 s7, s7 \n" // top_s32 -> top_s16 "vqmovn.s32 d12, q0 \n" "vqmovn.s32 d13, q1 \n" // top_s16 -> top_s8 "vqmovn.s16 d12, q6 \n" // save top_s8 "vst1.8 {d12}, [%0]! \n" // sum1 // top_f32 = top_f32 * scale_out "vmul.f32 q0, q8, %e18[1] \n" "vmul.f32 q1, q9, %e18[1] \n" // top_f32 -> top_s32 "vcvtr.s32.f32 s0, s0 \n" "vcvtr.s32.f32 s1, s1 \n" "vcvtr.s32.f32 s2, s2 \n" "vcvtr.s32.f32 s3, s3 \n" "vcvtr.s32.f32 s4, s4 \n" "vcvtr.s32.f32 s5, s5 \n" "vcvtr.s32.f32 s6, s6 \n" "vcvtr.s32.f32 s7, s7 \n" // top_s32 -> top_s16 "vqmovn.s32 d16, q0 \n" "vqmovn.s32 d17, q1 \n" // top_s16 -> top_s8 "vqmovn.s16 d16, q8 \n" // save top_s8 "vst1.8 {d16}, [%1]! \n" // sum2 // top_s32 -> top_f32 "vcvt.f32.s32 q10, q10 \n" "vcvt.f32.s32 q11, q11 \n" "vcvt.f32.s32 q12, q12 \n" "vcvt.f32.s32 q13, q13 \n" // top_f32 = top_f32 * scale_int "vmul.f32 q10, q10, %f17[0] \n" "vmul.f32 q11, q11, %f17[0] \n" "vmul.f32 q12, q12, %f17[1] \n" "vmul.f32 q13, q13, %f17[1] \n" // top_f32 = top_f32 + bias "vadd.f32 q10, q10, q4 \n" "vadd.f32 q11, q11, q4 \n" "vadd.f32 q12, q12, q5 \n" "vadd.f32 q13, q13, q5 \n" // top_f32 = top_f32 * scale_out "vmul.f32 q0, q10, %f18[0] \n" "vmul.f32 q1, q11, %f18[0] \n" // top_f32 -> top_s32 "vcvtr.s32.f32 s0, s0 \n" "vcvtr.s32.f32 s1, s1 \n" "vcvtr.s32.f32 s2, s2 \n" "vcvtr.s32.f32 s3, s3 \n" "vcvtr.s32.f32 s4, s4 \n" "vcvtr.s32.f32 s5, s5 \n" "vcvtr.s32.f32 s6, s6 \n" "vcvtr.s32.f32 s7, s7 \n" // top_s32 -> top_s16 "vqmovn.s32 d20, q0 \n" "vqmovn.s32 d21, q1 \n" // top_s16 -> top_s8 "vqmovn.s16 d20, q10 \n" // save top_s8 "vst1.8 {d20}, [%2]! \n" // sum3 // top_f32 = top_f32 * scale_out "vmul.f32 q0, q12, %f18[1] \n" "vmul.f32 q1, q13, %f18[1] \n" // top_f32 -> top_s32 "vcvtr.s32.f32 s0, s0 \n" "vcvtr.s32.f32 s1, s1 \n" "vcvtr.s32.f32 s2, s2 \n" "vcvtr.s32.f32 s3, s3 \n" "vcvtr.s32.f32 s4, s4 \n" "vcvtr.s32.f32 s5, s5 \n" "vcvtr.s32.f32 s6, s6 \n" "vcvtr.s32.f32 s7, s7 \n" // top_s32 -> top_s16 "vqmovn.s32 d24, q0 \n" "vqmovn.s32 d25, q1 \n" // top_s16 -> top_s8 "vqmovn.s16 d24, q12 \n" // save top_s8 "vst1.8 {d24}, [%3]! \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(tmpptr), // %4 "=r"(kptr) // %5 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(tmpptr), "5"(kptr), "r"(inch), // %12 "r"(bias0), // %13 "r"(bias1), // %14 "r"(bias2), // %15 "r"(bias3), // %16 "w"(_scale_in03), // %17 "w"(_scale_out03) // %18 : "cc", "memory", "r4", "q0", "q1", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #else int sum0_0 = 0; int sum0_1 = 0; int sum0_2 = 0; int sum0_3 = 0; int sum0_4 = 0; int sum0_5 = 0; int sum0_6 = 0; int sum0_7 = 0; int sum1_0 = 0; int sum1_1 = 0; int sum1_2 = 0; int sum1_3 = 0; int sum1_4 = 0; int sum1_5 = 0; int sum1_6 = 0; int sum1_7 = 0; int sum2_0 = 0; int sum2_1 = 0; int sum2_2 = 0; int sum2_3 = 0; int sum2_4 = 0; int sum2_5 = 0; int sum2_6 = 0; int sum2_7 = 0; int sum3_0 = 0; int sum3_1 = 0; int sum3_2 = 0; int sum3_3 = 0; int sum3_4 = 0; int sum3_5 = 0; int sum3_6 = 0; int sum3_7 = 0; for (int q = 0; q < inch; q++) { sum0_0 += tmpptr[0] * kptr[0]; sum0_1 += tmpptr[1] * kptr[0]; sum0_2 += tmpptr[2] * kptr[0]; sum0_3 += tmpptr[3] * kptr[0]; sum0_4 += tmpptr[4] * kptr[0]; sum0_5 += tmpptr[5] * kptr[0]; sum0_6 += tmpptr[6] * kptr[0]; sum0_7 += tmpptr[7] * kptr[0]; sum1_0 += tmpptr[0] * kptr[1]; sum1_1 += tmpptr[1] * kptr[1]; sum1_2 += tmpptr[2] * kptr[1]; sum1_3 += tmpptr[3] * kptr[1]; sum1_4 += tmpptr[4] * kptr[1]; sum1_5 += tmpptr[5] * kptr[1]; sum1_6 += tmpptr[6] * kptr[1]; sum1_7 += tmpptr[7] * kptr[1]; sum2_0 += tmpptr[0] * kptr[2]; sum2_1 += tmpptr[1] * kptr[2]; sum2_2 += tmpptr[2] * kptr[2]; sum2_3 += tmpptr[3] * kptr[2]; sum2_4 += tmpptr[4] * kptr[2]; sum2_5 += tmpptr[5] * kptr[2]; sum2_6 += tmpptr[6] * kptr[2]; sum2_7 += tmpptr[7] * kptr[2]; sum3_0 += tmpptr[0] * kptr[3]; sum3_1 += tmpptr[1] * kptr[3]; sum3_2 += tmpptr[2] * kptr[3]; sum3_3 += tmpptr[3] * kptr[3]; sum3_4 += tmpptr[4] * kptr[3]; sum3_5 += tmpptr[5] * kptr[3]; sum3_6 += tmpptr[6] * kptr[3]; sum3_7 += tmpptr[7] * kptr[3]; tmpptr += 8; kptr += 4; } outptr0[0] = float2int8(((float)sum0_0 * scale_requant_in0 + bias0) * scale_requant_out0); outptr0[1] = float2int8(((float)sum0_1 * scale_requant_in0 + bias0) * scale_requant_out0); outptr0[2] = float2int8(((float)sum0_2 * scale_requant_in0 + bias0) * scale_requant_out0); outptr0[3] = float2int8(((float)sum0_3 * scale_requant_in0 + bias0) * scale_requant_out0); outptr0[4] = float2int8(((float)sum0_4 * scale_requant_in0 + bias0) * scale_requant_out0); outptr0[5] = float2int8(((float)sum0_5 * scale_requant_in0 + bias0) * scale_requant_out0); outptr0[6] = float2int8(((float)sum0_6 * scale_requant_in0 + bias0) * scale_requant_out0); outptr0[7] = float2int8(((float)sum0_7 * scale_requant_in0 + bias0) * scale_requant_out0); outptr1[0] = float2int8(((float)sum1_0 * scale_requant_in1 + bias1) * scale_requant_out1); outptr1[1] = float2int8(((float)sum1_1 * scale_requant_in1 + bias1) * scale_requant_out1); outptr1[2] = float2int8(((float)sum1_2 * scale_requant_in1 + bias1) * scale_requant_out1); outptr1[3] = float2int8(((float)sum1_3 * scale_requant_in1 + bias1) * scale_requant_out1); outptr1[4] = float2int8(((float)sum1_4 * scale_requant_in1 + bias1) * scale_requant_out1); outptr1[5] = float2int8(((float)sum1_5 * scale_requant_in1 + bias1) * scale_requant_out1); outptr1[6] = float2int8(((float)sum1_6 * scale_requant_in1 + bias1) * scale_requant_out1); outptr1[7] = float2int8(((float)sum1_7 * scale_requant_in1 + bias1) * scale_requant_out1); outptr2[0] = float2int8(((float)sum2_0 * scale_requant_in2 + bias2) * scale_requant_out2); outptr2[1] = float2int8(((float)sum2_1 * scale_requant_in2 + bias2) * scale_requant_out2); outptr2[2] = float2int8(((float)sum2_2 * scale_requant_in2 + bias2) * scale_requant_out2); outptr2[3] = float2int8(((float)sum2_3 * scale_requant_in2 + bias2) * scale_requant_out2); outptr2[4] = float2int8(((float)sum2_4 * scale_requant_in2 + bias2) * scale_requant_out2); outptr2[5] = float2int8(((float)sum2_5 * scale_requant_in2 + bias2) * scale_requant_out2); outptr2[6] = float2int8(((float)sum2_6 * scale_requant_in2 + bias2) * scale_requant_out2); outptr2[7] = float2int8(((float)sum2_7 * scale_requant_in2 + bias2) * scale_requant_out2); outptr3[0] = float2int8(((float)sum3_0 * scale_requant_in3 + bias3) * scale_requant_out3); outptr3[1] = float2int8(((float)sum3_1 * scale_requant_in3 + bias3) * scale_requant_out3); outptr3[2] = float2int8(((float)sum3_2 * scale_requant_in3 + bias3) * scale_requant_out3); outptr3[3] = float2int8(((float)sum3_3 * scale_requant_in3 + bias3) * scale_requant_out3); outptr3[4] = float2int8(((float)sum3_4 * scale_requant_in3 + bias3) * scale_requant_out3); outptr3[5] = float2int8(((float)sum3_5 * scale_requant_in3 + bias3) * scale_requant_out3); outptr3[6] = float2int8(((float)sum3_6 * scale_requant_in3 + bias3) * scale_requant_out3); outptr3[7] = float2int8(((float)sum3_7 * scale_requant_in3 + bias3) * scale_requant_out3); outptr0 += 8; outptr1 += 8; outptr2 += 8; outptr3 += 8; #endif // __ARM_NEON } for (; i + 3 < size; i += 4) { const signed char* tmpptr = tmp.channel(i / 8 + (i % 8) / 4); const signed char* kptr = kernel.channel(p / 4); #if __ARM_NEON asm volatile( // inch loop "vmov.s32 q6, #0 \n" "vmov.s32 q7, #0 \n" "vmov.s32 q8, #0 \n" "vmov.s32 q9, #0 \n" "lsr r4, %12, #2 \n" // r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n" // for(; nn != 0; nn--) "pld [%4, #128] \n" "vld1.s8 {d28-d29}, [%4]! \n" // tmpr a00-a03,a10-a13,a20-a23,a30-a33 a(inch)(data) "vmovl.s8 q15, d29 \n" // a20-a23,a30-a33 "vmovl.s8 q14, d28 \n" // a00-a04,a10-a14 "vld1.s8 {d0-d1}, [%5]! \n" // kptr k00-k30,k01-k31,k02-k32,k03-k33 k(outch)(inch) "vmovl.s8 q1, d1 \n" // k02-k32,k03-k33 "vmovl.s8 q0, d0 \n" // k00-k30,k01-k31 "vmlal.s16 q6, d28, d0[0] \n" // sum0 = (a00-a03) * k00 "vmlal.s16 q7, d28, d0[1] \n" // sum1 = (a00-a03) * k10 "vmlal.s16 q8, d28, d0[2] \n" // sum2 = (a00-a03) * k20 "vmlal.s16 q9, d28, d0[3] \n" // sum3 = (a00-a03) * k30 "vmlal.s16 q6, d29, d1[0] \n" // sum0 += (a10-a13) * k01 "vmlal.s16 q7, d29, d1[1] \n" // sum1 += (a10-a13) * k11 "vmlal.s16 q8, d29, d1[2] \n" // sum2 += (a10-a13) * k21 "vmlal.s16 q9, d29, d1[3] \n" // sum3 += (a10-a13) * k31 "vmlal.s16 q6, d30, d2[0] \n" // sum0 += (a20-a23) * k02 "vmlal.s16 q7, d30, d2[1] \n" // sum1 += (a20-a23) * k12 "vmlal.s16 q8, d30, d2[2] \n" // sum2 += (a20-a23) * k22 "vmlal.s16 q9, d30, d2[3] \n" // sum3 += (a20-a23) * k32 "vmlal.s16 q6, d31, d3[0] \n" // sum0 += (a30-a33) * k03 "vmlal.s16 q7, d31, d3[1] \n" // sum1 += (a30-a33) * k13 "vmlal.s16 q8, d31, d3[2] \n" // sum2 += (a30-a33) * k23 "vmlal.s16 q9, d31, d3[3] \n" // sum3 += (a30-a33) * k33 "subs r4, r4, #1 \n" "bne 0b \n" // end for "1: \n" // remain loop "and r4, %12, #3 \n" // r4 = remain = inch & 3 "cmp r4, #0 \n" "beq 3f \n" "2: \n" // for(; remain != 0; remain--) "vld1.s8 {d2}, [%4] \n" // tmpr a00-a03 a(inch)(data) "vld1.s8 {d0}, [%5] \n" // kptr k00-k30 k(outch)(inch) "vmovl.s8 q1, d2 \n" "vmovl.s8 q0, d0 \n" "add %4, #4 \n" "add %5, #4 \n" "vmlal.s16 q6, d2, d0[0] \n" // sum0 += (a00-a03) * k00 "vmlal.s16 q7, d2, d0[1] \n" // sum1 += (a00-a03) * k10 "vmlal.s16 q8, d2, d0[2] \n" // sum2 += (a00-a03) * k20 "vmlal.s16 q9, d2, d0[3] \n" // sum3 += (a00-a03) * k30 "subs r4, r4, #1 \n" "bne 2b \n" "3: \n" // store the result to memory "vdup.f32 q14, %13 \n" // bias "vdup.f32 q15, %14 \n" // bias "vdup.f32 q4, %15 \n" // bias "vdup.f32 q5, %16 \n" // bias // sum0-1 // top_s32 -> top_f32 "vcvt.f32.s32 q6, q6 \n" "vcvt.f32.s32 q7, q7 \n" "vcvt.f32.s32 q8, q8 \n" "vcvt.f32.s32 q9, q9 \n" // top_f32 = top_f32 * scale_int "vmul.f32 q6, q6, %e17[0] \n" "vmul.f32 q7, q7, %e17[1] \n" "vmul.f32 q8, q8, %f17[0] \n" "vmul.f32 q9, q9, %f17[1] \n" // top_f32 = top_f32 + bias "vadd.f32 q6, q6, q14 \n" "vadd.f32 q7, q7, q15 \n" "vadd.f32 q8, q8, q4 \n" "vadd.f32 q9, q9, q5 \n" // top_f32 = top_f32 * scale_out "vmul.f32 q0, q6, %e18[0] \n" "vmul.f32 q1, q7, %e18[1] \n" // top_f32 -> top_s32 "vcvtr.s32.f32 s0, s0 \n" "vcvtr.s32.f32 s1, s1 \n" "vcvtr.s32.f32 s2, s2 \n" "vcvtr.s32.f32 s3, s3 \n" "vcvtr.s32.f32 s4, s4 \n" "vcvtr.s32.f32 s5, s5 \n" "vcvtr.s32.f32 s6, s6 \n" "vcvtr.s32.f32 s7, s7 \n" // top_s32 -> top_s16 "vqmovn.s32 d12, q0 \n" "vqmovn.s32 d13, q1 \n" // top_s16 -> top_s8 "vqmovn.s16 d12, q6 \n" // save top_s8 "vst1.s32 {d12[0]}, [%0]! \n" "vst1.s32 {d12[1]}, [%1]! \n" // sum1-2 // top_f32 = top_f32 * scale_out "vmul.f32 q0, q8, %f18[0] \n" "vmul.f32 q1, q9, %f18[1] \n" // top_f32 -> top_s32 "vcvtr.s32.f32 s0, s0 \n" "vcvtr.s32.f32 s1, s1 \n" "vcvtr.s32.f32 s2, s2 \n" "vcvtr.s32.f32 s3, s3 \n" "vcvtr.s32.f32 s4, s4 \n" "vcvtr.s32.f32 s5, s5 \n" "vcvtr.s32.f32 s6, s6 \n" "vcvtr.s32.f32 s7, s7 \n" // top_s32 -> top_s16 "vqmovn.s32 d16, q0 \n" "vqmovn.s32 d17, q1 \n" // top_s16 -> top_s8 "vqmovn.s16 d16, q8 \n" // save top_s8 "vst1.s32 {d16[0]}, [%2]! \n" "vst1.s32 {d16[1]}, [%3]! \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(tmpptr), // %4 "=r"(kptr) // %5 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(tmpptr), "5"(kptr), "r"(inch), // %12 "r"(bias0), // %13 "r"(bias1), // %14 "r"(bias2), // %15 "r"(bias3), // %16 "w"(_scale_in03), // %17 "w"(_scale_out03) // %18 : "cc", "memory", "r4", "q0", "q1", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #else int sum0_0 = 0; int sum0_1 = 0; int sum0_2 = 0; int sum0_3 = 0; int sum1_0 = 0; int sum1_1 = 0; int sum1_2 = 0; int sum1_3 = 0; int sum2_0 = 0; int sum2_1 = 0; int sum2_2 = 0; int sum2_3 = 0; int sum3_0 = 0; int sum3_1 = 0; int sum3_2 = 0; int sum3_3 = 0; for (int q = 0; q < inch; q++) { sum0_0 += tmpptr[0] * kptr[0]; sum0_1 += tmpptr[1] * kptr[0]; sum0_2 += tmpptr[2] * kptr[0]; sum0_3 += tmpptr[3] * kptr[0]; sum1_0 += tmpptr[0] * kptr[1]; sum1_1 += tmpptr[1] * kptr[1]; sum1_2 += tmpptr[2] * kptr[1]; sum1_3 += tmpptr[3] * kptr[1]; sum2_0 += tmpptr[0] * kptr[2]; sum2_1 += tmpptr[1] * kptr[2]; sum2_2 += tmpptr[2] * kptr[2]; sum2_3 += tmpptr[3] * kptr[2]; sum3_0 += tmpptr[0] * kptr[3]; sum3_1 += tmpptr[1] * kptr[3]; sum3_2 += tmpptr[2] * kptr[3]; sum3_3 += tmpptr[3] * kptr[3]; tmpptr += 4; kptr += 4; } outptr0[0] = float2int8(((float)sum0_0 * scale_requant_in0 + bias0) * scale_requant_out0); outptr0[1] = float2int8(((float)sum0_1 * scale_requant_in0 + bias0) * scale_requant_out0); outptr0[2] = float2int8(((float)sum0_2 * scale_requant_in0 + bias0) * scale_requant_out0); outptr0[3] = float2int8(((float)sum0_3 * scale_requant_in0 + bias0) * scale_requant_out0); outptr1[0] = float2int8(((float)sum1_0 * scale_requant_in1 + bias1) * scale_requant_out1); outptr1[1] = float2int8(((float)sum1_1 * scale_requant_in1 + bias1) * scale_requant_out1); outptr1[2] = float2int8(((float)sum1_2 * scale_requant_in1 + bias1) * scale_requant_out1); outptr1[3] = float2int8(((float)sum1_3 * scale_requant_in1 + bias1) * scale_requant_out1); outptr2[0] = float2int8(((float)sum2_0 * scale_requant_in2 + bias2) * scale_requant_out2); outptr2[1] = float2int8(((float)sum2_1 * scale_requant_in2 + bias2) * scale_requant_out2); outptr2[2] = float2int8(((float)sum2_2 * scale_requant_in2 + bias2) * scale_requant_out2); outptr2[3] = float2int8(((float)sum2_3 * scale_requant_in2 + bias2) * scale_requant_out2); outptr3[0] = float2int8(((float)sum3_0 * scale_requant_in3 + bias3) * scale_requant_out3); outptr3[1] = float2int8(((float)sum3_1 * scale_requant_in3 + bias3) * scale_requant_out3); outptr3[2] = float2int8(((float)sum3_2 * scale_requant_in3 + bias3) * scale_requant_out3); outptr3[3] = float2int8(((float)sum3_3 * scale_requant_in3 + bias3) * scale_requant_out3); outptr0 += 4; outptr1 += 4; outptr2 += 4; outptr3 += 4; #endif // __ARM_NEON } for (; i < size; i++) { const signed char* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4); const signed char* kptr = kernel.channel(p / 4); #if __ARM_NEON asm volatile( // inch loop "veor q6, q6, q6 \n" "veor q7, q7, q7 \n" "veor q8, q8, q8 \n" "veor q9, q9, q9 \n" "vmov.s32 q10, #0 \n" "lsr r4, %12, #2 \n" // r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n" // for(; nn != 0; nn--) "pld [%4, #128] \n" "vld1.s8 {d4}, [%4] \n" // tmpr a00,a10,a20,a30 a(inch)(data) "add %4, #4 \n" "vmovl.s8 q2, d4 \n" // a00,a10,a20,a30 "vld1.s8 {d0-d1}, [%5]! \n" // kptr k00-k30,k01-k31,k02-k32,k03-k33 k(outch)(inch) "vmovl.s8 q1, d1 \n" // k02-k32,k03-k33 "vmovl.s8 q0, d0 \n" // k00-k30,k01-k31 "vmlal.s16 q6, d0, d4[0] \n" // (k00-k30) * a00 "vmlal.s16 q7, d1, d4[1] \n" // (k01-k31) * a10 "vmlal.s16 q8, d2, d4[2] \n" // (k02-k32) * a20 "vmlal.s16 q9, d3, d4[3] \n" // (k03-k33) * a30 "subs r4, r4, #1 \n" "bne 0b \n" // end for "vadd.s32 q6, q6, q7 \n" "vadd.s32 q9, q9, q8 \n" "vadd.s32 q10, q6, q9 \n" "1: \n" // remain loop "and r4, %12, #3 \n" // r4 = remain = inch & 3 "cmp r4, #0 \n" "beq 3f \n" "2: \n" // for(; remain != 0; remain--) "vld1.s8 {d2}, [%4] \n" // tmpr a00 a(inch)(data) "vld1.s8 {d0}, [%5] \n" // kptr k00-k30 k(outch)(inch) "vmovl.s8 q1, d2 \n" "vmovl.s8 q0, d0 \n" "add %4, #1 \n" "add %5, #4 \n" "vmlal.s16 q10, d0, d2[0] \n" "subs r4, r4, #1 \n" "bne 2b \n" "3: \n" // store the result to memory // top_s32 -> top_f32 "vcvt.f32.s32 q10, q10 \n" // top_f32 = top_f32 * scale_int "vmul.f32 q10, q10, %q14 \n" // top_f32 = top_f32 + bias "vadd.f32 q10, q10, %q13 \n" // top_f32 = top_f32 * scale_out "vmul.f32 q0, q10, %q15 \n" // top_f32 -> top_s32 "vcvtr.s32.f32 s0, s0 \n" "vcvtr.s32.f32 s1, s1 \n" "vcvtr.s32.f32 s2, s2 \n" "vcvtr.s32.f32 s3, s3 \n" // top_s32 -> top_s16 "vqmovn.s32 d12, q0 \n" // top_s16 -> top_s8 "vqmovn.s16 d12, q6 \n" // save top_s8 "vst1.8 {d12[0]}, [%0]! \n" "vst1.8 {d12[1]}, [%1]! \n" "vst1.8 {d12[2]}, [%2]! \n" "vst1.8 {d12[3]}, [%3]! \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(tmpptr), // %4 "=r"(kptr) // %5 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(tmpptr), "5"(kptr), "r"(inch), // %12 "w"(_bias03), // %13 "w"(_scale_in03), // %14 "w"(_scale_out03) // %15 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12"); #else int sum0 = 0; int sum1 = 0; int sum2 = 0; int sum3 = 0; for (int q = 0; q < inch; q++) { sum0 += tmpptr[0] * kptr[0]; sum1 += tmpptr[0] * kptr[1]; sum2 += tmpptr[0] * kptr[2]; sum3 += tmpptr[0] * kptr[3]; tmpptr++; kptr += 4; } outptr0[0] = float2int8(((float)sum0 * scale_requant_in0 + bias0) * scale_requant_out0); outptr1[0] = float2int8(((float)sum1 * scale_requant_in1 + bias1) * scale_requant_out1); outptr2[0] = float2int8(((float)sum2 * scale_requant_in2 + bias2) * scale_requant_out2); outptr3[0] = float2int8(((float)sum3 * scale_requant_in3 + bias3) * scale_requant_out3); outptr0++; outptr1++; outptr2++; outptr3++; #endif // __ARM_NEON } } remain_outch_start += nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { Mat out0 = top_blob.channel(p); signed char* outptr0 = out0; const float bias0 = bias ? bias[p] : 0.f; const float scale_requant_in = scales_requant[2 * p]; const float scale_requant_out = scales_requant[2 * p + 1]; #if __ARM_NEON float32x4_t _bias0 = vdupq_n_f32(bias0); float32x4_t _scale_in = vdupq_n_f32(scale_requant_in); float32x4_t _scale_out = vdupq_n_f32(scale_requant_out); #endif // __ARM_NEON int i = 0; for (; i + 7 < size; i += 8) { const signed char* tmpptr = tmp.channel(i / 8); const signed char* kptr = kernel.channel(p / 4 + p % 4); #if __ARM_NEON asm volatile( // inch loop "vmov.s32 q6, #0 \n" "vmov.s32 q7, #0 \n" "lsr r4, %6, #2 \n" // r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n" // for(; nn != 0; nn--) "pld [%1, #128] \n" "vld1.s8 {d4-d7}, [%1]! \n" // tmpr a00-a07,a10-a17,a20-a27,a30-a37 a(inch)(data) "vmovl.s8 q5, d7 \n" // a30-a37 "vmovl.s8 q4, d6 \n" // a20-a27 "vmovl.s8 q3, d5 \n" // a10-a17 "vmovl.s8 q2, d4 \n" // a00-a07 "vld1.s8 {d0}, [%2] \n" // kptr k00,k01,k02,k03 k(outch)(inch) "vmovl.s8 q0, d0 \n" // k00,k01,k02,k03 "add %2, #4 \n" "vmlal.s16 q6, d4, d0[0] \n" // (a00-a07) * k00 "vmlal.s16 q7, d5, d0[0] \n" "vmlal.s16 q6, d6, d0[1] \n" // (a10-a17) * k01 "vmlal.s16 q7, d7, d0[1] \n" "vmlal.s16 q6, d8, d0[2] \n" // (a20-a27) * k02 "vmlal.s16 q7, d9, d0[2] \n" "vmlal.s16 q6, d10, d0[3] \n" // (a30-a37) * k03 "vmlal.s16 q7, d11, d0[3] \n" "subs r4, r4, #1 \n" "bne 0b \n" // end for "1: \n" // remain loop "and r4, %6, #3 \n" // r4 = remain = inch & 3 "cmp r4, #0 \n" "beq 3f \n" "2: \n" // for(; remain != 0; remain--) "vld1.s8 {d2}, [%1]! \n" // tmpr a00-a07 a(inch)(data) "vld1.s8 {d0}, [%2] \n" // kptr k00 k(outch)(inch) "vmovl.s8 q1, d2 \n" "vmovl.s8 q0, d0 \n" "add %2, #1 \n" "vmlal.s16 q6, d2, d0[0] \n" // (a00-a07) * k00 "vmlal.s16 q7, d3, d0[0] \n" "subs r4, r4, #1 \n" "bne 2b \n" "3: \n" // store the result to memory // top_s32 -> top_f32 "vcvt.f32.s32 q6, q6 \n" "vcvt.f32.s32 q7, q7 \n" // top_f32 = top_f32 * scale_in "vmul.f32 q6, q6, %q8 \n" "vmul.f32 q7, q7, %q8 \n" // top_f32 = top_f32 + bias "vadd.f32 q6, q6, %q7 \n" "vadd.f32 q7, q7, %q7 \n" // top_f32 = top_f32 * scale_out "vmul.f32 q0, q6, %q9 \n" "vmul.f32 q1, q7, %q9 \n" // top_f32 -> top_s32 "vcvtr.s32.f32 s0, s0 \n" "vcvtr.s32.f32 s1, s1 \n" "vcvtr.s32.f32 s2, s2 \n" "vcvtr.s32.f32 s3, s3 \n" "vcvtr.s32.f32 s4, s4 \n" "vcvtr.s32.f32 s5, s5 \n" "vcvtr.s32.f32 s6, s6 \n" "vcvtr.s32.f32 s7, s7 \n" // top_s32 -> top_s16 "vqmovn.s32 d12, q0 \n" "vqmovn.s32 d13, q1 \n" // top_s16 -> top_s8 "vqmovn.s16 d12, q6 \n" // save top_s8 "vst1.8 {d12}, [%0]! \n" : "=r"(outptr0), // %0 "=r"(tmpptr), // %1 "=r"(kptr) // %2 : "0"(outptr0), "1"(tmpptr), "2"(kptr), "r"(inch), // %6 "w"(_bias0), // %7 "w"(_scale_in), // %8 "w"(_scale_out) // %9 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7"); #else int sum0 = 0; int sum1 = 0; int sum2 = 0; int sum3 = 0; int sum4 = 0; int sum5 = 0; int sum6 = 0; int sum7 = 0; for (int q = 0; q < inch; q++) { sum0 += tmpptr[0] * kptr[0]; sum1 += tmpptr[1] * kptr[0]; sum2 += tmpptr[2] * kptr[0]; sum3 += tmpptr[3] * kptr[0]; sum4 += tmpptr[4] * kptr[0]; sum5 += tmpptr[5] * kptr[0]; sum6 += tmpptr[6] * kptr[0]; sum7 += tmpptr[7] * kptr[0]; tmpptr += 8; kptr++; } outptr0[0] = float2int8(((float)sum0 * scale_requant_in + bias0) * scale_requant_out); outptr0[1] = float2int8(((float)sum1 * scale_requant_in + bias0) * scale_requant_out); outptr0[2] = float2int8(((float)sum2 * scale_requant_in + bias0) * scale_requant_out); outptr0[3] = float2int8(((float)sum3 * scale_requant_in + bias0) * scale_requant_out); outptr0[4] = float2int8(((float)sum4 * scale_requant_in + bias0) * scale_requant_out); outptr0[5] = float2int8(((float)sum5 * scale_requant_in + bias0) * scale_requant_out); outptr0[6] = float2int8(((float)sum6 * scale_requant_in + bias0) * scale_requant_out); outptr0[7] = float2int8(((float)sum7 * scale_requant_in + bias0) * scale_requant_out); outptr0 += 8; #endif // __ARM_NEON } for (; i + 3 < size; i += 4) { const signed char* tmpptr = tmp.channel(i / 8 + (i % 8) / 4); const signed char* kptr = kernel.channel(p / 4 + p % 4); #if __ARM_NEON asm volatile( // inch loop "vmov.s32 q6, #0 \n" "lsr r4, %6, #2 \n" // r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n" // for(; nn != 0; nn--) "pld [%2, #128] \n" "vld1.s8 {d4-d5}, [%1]! \n" // tmpr a00-a03,a10-a13,a20-a23,a30-a33 a(inch)(data) "vmovl.s8 q3, d5 \n" // a20-a23,a30-a33 "vmovl.s8 q2, d4 \n" // a00-a03,a10-a13 "vld1.s8 {d0}, [%2] \n" // kptr k00,k01,k02,k03 k(outch)(inch) "vmovl.s8 q0, d0 \n" // k00,k01,k02,k03 "add %2, #4 \n" "vmlal.s16 q6, d4, d0[0] \n" // (a00-a03) * k00 "vmlal.s16 q6, d5, d0[1] \n" // (a10-a13) * k01 "vmlal.s16 q6, d6, d0[2] \n" // (a20-a23) * k02 "vmlal.s16 q6, d7, d0[3] \n" // (a30-a33) * k03 "subs r4, r4, #1 \n" "bne 0b \n" // end for "1: \n" // remain loop "and r4, %6, #3 \n" // r4 = remain = inch & 3 "cmp r4, #0 \n" "beq 3f \n" "2: \n" // for(; remain != 0; remain--) "vld1.s8 {d2}, [%1] \n" // tmpr a00-a03 a(inch)(data) "vld1.s8 {d0}, [%2] \n" // kptr k00 k(outch)(inch) "vmovl.s8 q1, d2 \n" "vmovl.s8 q0, d0 \n" "add %1, #4 \n" "add %2, #1 \n" "vmlal.s16 q6, d2, d0[0] \n" // (a00-a03) * k00 "subs r4, r4, #1 \n" "bne 2b \n" "3: \n" // store the result to memory // top_s32 -> top_f32 "vcvt.f32.s32 q6, q6 \n" // top_f32 = top_f32 * scale_in "vmul.f32 q6, q6, %q8 \n" // top_f32 = top_f32 + bias "vadd.f32 q6, q6, %q7 \n" // top_f32 = top_f32 * scale_out "vmul.f32 q0, q6, %q9 \n" // top_f32 -> top_s32 "vcvtr.s32.f32 s0, s0 \n" "vcvtr.s32.f32 s1, s1 \n" "vcvtr.s32.f32 s2, s2 \n" "vcvtr.s32.f32 s3, s3 \n" // top_s32 -> top_s16 "vqmovn.s32 d12, q0 \n" // top_s16 -> top_s8 "vqmovn.s16 d12, q6 \n" "vst1.s32 {d12[0]}, [%0]! \n" : "=r"(outptr0), // %0 "=r"(tmpptr), // %1 "=r"(kptr) // %2 : "0"(outptr0), "1"(tmpptr), "2"(kptr), "r"(inch), // %6 "w"(_bias0), // %7 "w"(_scale_in), // %8 "w"(_scale_out) // %9 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6"); #else int sum0 = 0; int sum1 = 0; int sum2 = 0; int sum3 = 0; for (int q = 0; q < inch; q++) { sum0 += tmpptr[0] * kptr[0]; sum1 += tmpptr[1] * kptr[0]; sum2 += tmpptr[2] * kptr[0]; sum3 += tmpptr[3] * kptr[0]; tmpptr += 4; kptr++; } outptr0[0] = float2int8(((float)sum0 * scale_requant_in + bias0) * scale_requant_out); outptr0[1] = float2int8(((float)sum1 * scale_requant_in + bias0) * scale_requant_out); outptr0[2] = float2int8(((float)sum2 * scale_requant_in + bias0) * scale_requant_out); outptr0[3] = float2int8(((float)sum3 * scale_requant_in + bias0) * scale_requant_out); outptr0 += 4; #endif // __ARM_NEON } for (; i < size; i++) { const signed char* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4); const signed char* kptr = kernel.channel(p / 4 + p % 4); int q = 0; int sum0 = 0; for (; q < inch; q++) { sum0 += tmpptr[0] * kptr[0]; tmpptr++; kptr++; } outptr0[0] = float2int8(((float)sum0 * scale_requant_in + bias0) * scale_requant_out); outptr0++; } } } #endif
omp_task_depend_resize_hashmap.c
// RUN: %libomp-compile && env KMP_ENABLE_TASK_THROTTLING=0 %libomp-run // This test is known to be fragile on NetBSD kernel at the moment, // https://bugs.llvm.org/show_bug.cgi?id=42020. // UNSUPPORTED: netbsd // Very flaky on openmp-clang-x86_64-linux-debian. // https://bugs.llvm.org/show_bug.cgi?id=45397 // UNSUPPORTED: linux #include<omp.h> #include<stdlib.h> #include<string.h> // The first hashtable static size is 997 #define NUM_DEPS 4000 int main() { int *deps = calloc(NUM_DEPS, sizeof(int)); int i; int failed = 0; #pragma omp parallel #pragma omp master { for (i = 0; i < NUM_DEPS; i++) { #pragma omp task firstprivate(i) depend(inout: deps[i]) { deps[i] = 1; } #pragma omp task firstprivate(i) depend(inout: deps[i]) { deps[i] = 2; } } } for (i = 0; i < NUM_DEPS; i++) { if (deps[i] != 2) failed++; } return failed; }
GB_unop__identity_int32_int16.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_int32_int16) // op(A') function: GB (_unop_tran__identity_int32_int16) // C type: int32_t // A type: int16_t // cast: int32_t cij = (int32_t) aij // unaryop: cij = aij #define GB_ATYPE \ int16_t #define GB_CTYPE \ int32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ int32_t z = (int32_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int16_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int32_t z = (int32_t) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT32 || GxB_NO_INT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_int32_int16) ( int32_t *Cx, // Cx and Ax may be aliased const int16_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int16_t aij = Ax [p] ; int32_t z = (int32_t) aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int16_t aij = Ax [p] ; int32_t z = (int32_t) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_int32_int16) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
xgboost_data.h
#ifndef XGBOOST_DATA_H #define XGBOOST_DATA_H /*! * \file xgboost_data.h * \brief the input data structure for gradient boosting * \author Tianqi Chen: tianqi.tchen@gmail.com */ #include <vector> #include <climits> #include "../utils/xgboost_utils.h" #include "../utils/xgboost_stream.h" #include "../utils/xgboost_matrix_csr.h" namespace xgboost{ namespace booster{ /*! \brief interger type used in boost */ typedef int bst_int; /*! \brief unsigned interger type used in boost */ typedef unsigned bst_uint; /*! \brief float type used in boost */ typedef float bst_float; /*! \brief debug option for booster */ const bool bst_debug = false; }; }; namespace xgboost{ namespace booster{ /** * \brief This is a interface, defining the way to access features, * by column or by row. This interface is used to make implementation * of booster does not depend on how feature is stored. * * Why template instead of virtual class: for efficiency * feature matrix is going to be used by most inner loop of the algorithm * * \tparam Derived type of actual implementation * \sa FMatrixS: most of time FMatrixS is sufficient, refer to it if you find it confusing */ template<typename Derived> struct FMatrix{ public: /*! \brief exmaple iterator over one row */ struct RowIter{ /*! * \brief move to next position * \return whether there is element in next position */ inline bool Next(void); /*! \return feature index in current position */ inline bst_uint findex(void) const; /*! \return feature value in current position */ inline bst_float fvalue(void) const; }; /*! \brief example iterator over one column */ struct ColIter{ /*! * \brief move to next position * \return whether there is element in next position */ inline bool Next(void); /*! \return row index of current position */ inline bst_uint rindex(void) const; /*! \return feature value in current position */ inline bst_float fvalue(void) const; }; /*! \brief backward iterator over column */ struct ColBackIter : public ColIter {}; public: /*! * \brief get number of rows * \return number of rows */ inline size_t NumRow(void) const; /*! * \brief get number of columns * \return number of columns */ inline size_t NumCol(void) const; /*! * \brief get row iterator * \param ridx row index * \return row iterator */ inline RowIter GetRow(size_t ridx) const; /*! * \brief get number of column groups, this ise used together with GetRow( ridx, gid ) * \return number of column group */ inline unsigned NumColGroup(void) const{ return 1; } /*! * \brief get row iterator, return iterator of specific column group * \param ridx row index * \param gid colmun group id * \return row iterator, only iterates over features of specified column group */ inline RowIter GetRow(size_t ridx, unsigned gid) const; /*! \return whether column access is enabled */ inline bool HaveColAccess(void) const; /*! * \brief get column iterator, the columns must be sorted by feature value * \param ridx column index * \return column iterator */ inline ColIter GetSortedCol(size_t ridx) const; /*! * \brief get column backward iterator, starts from biggest fvalue, and iterator back * \param ridx column index * \return reverse column iterator */ inline ColBackIter GetReverseSortedCol(size_t ridx) const; }; }; }; namespace xgboost{ namespace booster{ /*! * \brief feature matrix to store training instance, in sparse CSR format */ class FMatrixS : public FMatrix<FMatrixS>{ public: /*! \brief one entry in a row */ struct REntry{ /*! \brief feature index */ bst_uint findex; /*! \brief feature value */ bst_float fvalue; /*! \brief constructor */ REntry(void){} /*! \brief constructor */ REntry(bst_uint findex, bst_float fvalue) : findex(findex), fvalue(fvalue){} inline static bool cmp_fvalue(const REntry &a, const REntry &b){ return a.fvalue < b.fvalue; } }; /*! \brief one row of sparse feature matrix */ struct Line{ /*! \brief array of feature index */ const REntry *data_; /*! \brief size of the data */ bst_uint len; /*! \brief get k-th element */ inline const REntry& operator[](unsigned i) const{ return data_[i]; } }; /*! \brief row iterator */ struct RowIter{ const REntry *dptr_, *end_; RowIter(const REntry* dptr, const REntry* end) :dptr_(dptr), end_(end){} inline bool Next(void){ if (dptr_ == end_) return false; else{ ++dptr_; return true; } } inline bst_uint findex(void) const{ return dptr_->findex; } inline bst_float fvalue(void) const{ return dptr_->fvalue; } }; /*! \brief column iterator */ struct ColIter : public RowIter{ ColIter(const REntry* dptr, const REntry* end) :RowIter(dptr, end){} inline bst_uint rindex(void) const{ return this->findex(); } }; /*! \brief reverse column iterator */ struct ColBackIter : public ColIter{ ColBackIter(const REntry* dptr, const REntry* end) :ColIter(dptr, end){} // shadows RowIter::Next inline bool Next(void){ if (dptr_ == end_) return false; else{ --dptr_; return true; } } }; public: /*! \brief constructor */ FMatrixS(void){ this->Clear(); } /*! \brief get number of rows */ inline size_t NumRow(void) const{ return row_ptr_.size() - 1; } /*! * \brief get number of nonzero entries * \return number of nonzero entries */ inline size_t NumEntry(void) const{ return row_data_.size(); } /*! \brief clear the storage */ inline void Clear(void){ row_ptr_.clear(); row_ptr_.push_back(0); row_data_.clear(); col_ptr_.clear(); col_data_.clear(); } /*! \brief get sparse part of current row */ inline Line operator[](size_t sidx) const{ Line sp; utils::Assert(!bst_debug || sidx < this->NumRow(), "row id exceed bound"); sp.len = static_cast<bst_uint>(row_ptr_[sidx + 1] - row_ptr_[sidx]); sp.data_ = &row_data_[row_ptr_[sidx]]; return sp; } /*! * \brief add a row to the matrix, with data stored in STL container * \param findex feature index * \param fvalue feature value * \param fstart start bound of feature * \param fend end bound range of feature * \return the row id added line */ inline size_t AddRow(const std::vector<bst_uint> &findex, const std::vector<bst_float> &fvalue, unsigned fstart = 0, unsigned fend = UINT_MAX){ utils::Assert(findex.size() == fvalue.size()); unsigned cnt = 0; for (size_t i = 0; i < findex.size(); i++){ if (findex[i] < fstart || findex[i] >= fend) continue; row_data_.push_back(REntry(findex[i], fvalue[i])); cnt++; } row_ptr_.push_back(row_ptr_.back() + cnt); return row_ptr_.size() - 2; } /*! \brief get row iterator*/ inline RowIter GetRow(size_t ridx) const{ utils::Assert(!bst_debug || ridx < this->NumRow(), "row id exceed bound"); return RowIter(&row_data_[row_ptr_[ridx]] - 1, &row_data_[row_ptr_[ridx + 1]] - 1); } /*! \brief get row iterator*/ inline RowIter GetRow(size_t ridx, unsigned gid) const{ utils::Assert(gid == 0, "FMatrixS only have 1 column group"); return FMatrixS::GetRow(ridx); } public: /*! \return whether column access is enabled */ inline bool HaveColAccess(void) const{ return col_ptr_.size() != 0 && col_data_.size() == row_data_.size(); } /*! \brief get number of colmuns */ inline size_t NumCol(void) const{ utils::Assert(this->HaveColAccess()); return col_ptr_.size() - 1; } /*! \brief get col iterator*/ inline ColIter GetSortedCol(size_t cidx) const{ utils::Assert(!bst_debug || cidx < this->NumCol(), "col id exceed bound"); return ColIter(&col_data_[col_ptr_[cidx]] - 1, &col_data_[col_ptr_[cidx + 1]] - 1); } /*! \brief get col iterator */ inline ColBackIter GetReverseSortedCol(size_t cidx) const{ utils::Assert(!bst_debug || cidx < this->NumCol(), "col id exceed bound"); return ColBackIter(&col_data_[col_ptr_[cidx + 1]], &col_data_[col_ptr_[cidx]]); } /*! * \brief intialize the data so that we have both column and row major * access, call this whenever we need column access */ inline void InitData(void){ utils::SparseCSRMBuilder<REntry> builder(col_ptr_, col_data_); builder.InitBudget(0); for (size_t i = 0; i < this->NumRow(); i++){ for (RowIter it = this->GetRow(i); it.Next();){ builder.AddBudget(it.findex()); } } builder.InitStorage(); for (size_t i = 0; i < this->NumRow(); i++){ for (RowIter it = this->GetRow(i); it.Next();){ builder.PushElem(it.findex(), REntry((bst_uint)i, it.fvalue())); } } // sort columns unsigned ncol = static_cast<unsigned>(this->NumCol()); #pragma omp parallel for schedule(static) for (unsigned i = 0; i < ncol; i++){ std::sort(&col_data_[col_ptr_[i]], &col_data_[col_ptr_[i + 1]], REntry::cmp_fvalue); } } /*! * \brief save data to binary stream * note: since we have size_t in ptr, * the function is not consistent between 64bit and 32bit machine * \param fo output stream */ inline void SaveBinary(utils::IStream &fo) const{ FMatrixS::SaveBinary(fo, row_ptr_, row_data_); int col_access = this->HaveColAccess() ? 1 : 0; fo.Write(&col_access, sizeof(int)); if (col_access != 0){ FMatrixS::SaveBinary(fo, col_ptr_, col_data_); } } /*! * \brief load data from binary stream * note: since we have size_t in ptr, * the function is not consistent between 64bit and 32bit machin * \param fi input stream */ inline void LoadBinary(utils::IStream &fi){ FMatrixS::LoadBinary(fi, row_ptr_, row_data_); int col_access; fi.Read(&col_access, sizeof(int)); if (col_access != 0){ FMatrixS::LoadBinary(fi, col_ptr_, col_data_); }else{ this->InitData(); } } /*! * \brief load from text file * \param fi input file pointer */ inline void LoadText(FILE *fi){ this->Clear(); int ninst; while (fscanf(fi, "%d", &ninst) == 1){ std::vector<booster::bst_uint> findex; std::vector<booster::bst_float> fvalue; while (ninst--){ unsigned index; float value; utils::Assert(fscanf(fi, "%u:%f", &index, &value) == 2, "load Text"); findex.push_back(index); fvalue.push_back(value); } this->AddRow(findex, fvalue); } // initialize column support as well this->InitData(); } private: /*! * \brief save data to binary stream * \param fo output stream * \param ptr pointer data * \param data data content */ inline static void SaveBinary(utils::IStream &fo, const std::vector<size_t> &ptr, const std::vector<REntry> &data){ size_t nrow = ptr.size() - 1; fo.Write(&nrow, sizeof(size_t)); fo.Write(&ptr[0], ptr.size() * sizeof(size_t)); if (data.size() != 0){ fo.Write(&data[0], data.size() * sizeof(REntry)); } } /*! * \brief load data from binary stream * \param fi input stream * \param ptr pointer data * \param data data content */ inline static void LoadBinary(utils::IStream &fi, std::vector<size_t> &ptr, std::vector<REntry> &data){ size_t nrow; utils::Assert(fi.Read(&nrow, sizeof(size_t)) != 0, "Load FMatrixS"); ptr.resize(nrow + 1); utils::Assert(fi.Read(&ptr[0], ptr.size() * sizeof(size_t)) != 0, "Load FMatrixS"); data.resize(ptr.back()); if (data.size() != 0){ utils::Assert(fi.Read(&data[0], data.size() * sizeof(REntry)) != 0, "Load FMatrixS"); } } public: /*! \brief row pointer of CSR sparse storage */ std::vector<size_t> row_ptr_; /*! \brief data in the row */ std::vector<REntry> row_data_; /*! \brief column pointer of CSC format */ std::vector<size_t> col_ptr_; /*! \brief column datas */ std::vector<REntry> col_data_; }; }; }; #endif