source
stringlengths
3
92
c
stringlengths
26
2.25M
ast-dump-openmp-distribute-parallel-for.c
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s void test_one(int x) { #pragma omp distribute parallel for for (int i = 0; i < x; i++) ; } void test_two(int x, int y) { #pragma omp distribute parallel for for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_three(int x, int y) { #pragma omp distribute parallel for collapse(1) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_four(int x, int y) { #pragma omp distribute parallel for collapse(2) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_five(int x, int y, int z) { #pragma omp distribute parallel for collapse(2) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) for (int i = 0; i < z; i++) ; } // CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK: |-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-distribute-parallel-for.c:3:1, line:7:1> line:3:6 test_one 'void (int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:22, line:7:1> // CHECK-NEXT: | `-OMPDistributeParallelForDirective {{.*}} <line:4:1, col:36> // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:5:3, line:6:5> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-ForStmt {{.*}} <line:5:3, line:6:5> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:5:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:6:5> openmp_structured_block // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-distribute-parallel-for.c:4:1) *const restrict' // CHECK-NEXT: | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | `-DeclRefExpr {{.*}} <col:3> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: |-FunctionDecl {{.*}} <line:9:1, line:14:1> line:9:6 test_two 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:22, col:26> col:26 used y 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:29, line:14:1> // CHECK-NEXT: | `-OMPDistributeParallelForDirective {{.*}} <line:10:1, col:36> // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-ForStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:11:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ForStmt {{.*}} <line:12:5, line:13:7> openmp_structured_block // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:12:10, col:19> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:13:7> // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-distribute-parallel-for.c:10:1) *const restrict' // CHECK-NEXT: | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:11:3> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: |-FunctionDecl {{.*}} <line:16:1, line:21:1> line:16:6 test_three 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:17, col:21> col:21 used x 'int' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:24, col:28> col:28 used y 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:31, line:21:1> // CHECK-NEXT: | `-OMPDistributeParallelForDirective {{.*}} <line:17:1, col:48> // CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:37, col:47> // CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:46> 'int' // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:46> 'int' 1 // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-ForStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:18:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ForStmt {{.*}} <line:19:5, line:20:7> openmp_structured_block // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:19:10, col:19> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:20:7> // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-distribute-parallel-for.c:17:1) *const restrict' // CHECK-NEXT: | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:18:3> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: |-FunctionDecl {{.*}} <line:23:1, line:28:1> line:23:6 test_four 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:30, line:28:1> // CHECK-NEXT: | `-OMPDistributeParallelForDirective {{.*}} <line:24:1, col:48> // CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:37, col:47> // CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:46> 'int' // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:46> 'int' 2 // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-ForStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:25:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ForStmt {{.*}} <line:26:5, line:27:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:26:10, col:19> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:27:7> openmp_structured_block // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-distribute-parallel-for.c:24:1) *const restrict' // CHECK-NEXT: | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:25:3> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:26:5> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: `-FunctionDecl {{.*}} <line:30:1, line:36:1> line:30:6 test_five 'void (int, int, int)' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:30, col:34> col:34 used z 'int' // CHECK-NEXT: `-CompoundStmt {{.*}} <col:37, line:36:1> // CHECK-NEXT: `-OMPDistributeParallelForDirective {{.*}} <line:31:1, col:48> // CHECK-NEXT: |-OMPCollapseClause {{.*}} <col:37, col:47> // CHECK-NEXT: | `-ConstantExpr {{.*}} <col:46> 'int' // CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:46> 'int' 2 // CHECK-NEXT: `-CapturedStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | |-ForStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: | | |-DeclStmt {{.*}} <line:32:8, col:17> // CHECK-NEXT: | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | |-<<<NULL>>> // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | `-ForStmt {{.*}} <line:33:5, line:35:9> // CHECK-NEXT: | | |-DeclStmt {{.*}} <line:33:10, col:19> // CHECK-NEXT: | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | |-<<<NULL>>> // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | `-ForStmt {{.*}} <line:34:7, line:35:9> openmp_structured_block // CHECK-NEXT: | | |-DeclStmt {{.*}} <line:34:12, col:21> // CHECK-NEXT: | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | |-<<<NULL>>> // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | `-NullStmt {{.*}} <line:35:9> // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-distribute-parallel-for.c:31:1) *const restrict' // CHECK-NEXT: | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: |-DeclRefExpr {{.*}} <line:32:3> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: |-DeclRefExpr {{.*}} <line:33:5> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
q2.c
#include <math.h> #include <omp.h> #include <stdio.h> #include <stdlib.h> #include "common.h" const i32 REPEAT_COUNT = 512; const i32 ARRAY_SIZE = 8192; const i32 RECURSION_SIZE_LIMIT = 32; const i32 FILE_ARRAY_SIZE = 100000; i32* get_random_array(i32 size) { i32* arr = malloc(sizeof(i32) * size); for (i32 i = 0; i < size; i++) { arr[i] = rand() % 500; } return arr; } i32* get_array_from_file() { i32* arr = malloc(sizeof(i32) * FILE_ARRAY_SIZE); FILE* file = fopen("input1.txt", "r"); for (i32 i = 0; i < FILE_ARRAY_SIZE; i++) { i32 elem; fscanf(file, "%d", &elem); arr[i] = elem; } fclose(file); return arr; } void output_array_to_file(i32* arr) { FILE* file = fopen("output1.txt", "w"); for (i32 i = 0; i < FILE_ARRAY_SIZE; i++) { fprintf(file, "%d\n", arr[i]); } fclose(file); } void insertion_sort(i32* arr, i32 l, i32 r) { for (i32 i = l + 1; i < r; i++) { i32 key = arr[i]; i32 j = i - 1; while (j >= l && arr[j] > key) { arr[j + 1] = arr[j]; j--; } arr[j + 1] = key; } } void merge(i32* arr, i32 l, i32 m, i32 r) { i32 n1 = m - l; i32 n2 = r - m; i32* L = malloc(sizeof(i32) * n1); i32* R = malloc(sizeof(i32) * n2); for (i32 i = 0; i < n1; i++) { L[i] = arr[l + i]; } for (i32 i = 0; i < n2; i++) { R[i] = arr[m + i]; } i32 i = 0, j = 0; i32 k = l; while (i < n1 && j < n2) { if (L[i] <= R[j]) { arr[k] = L[i]; i++; } else { arr[k] = R[j]; j++; } k++; } while (i < n1) { arr[k] = L[i]; i++; k++; } while (j < n2) { arr[k] = R[j]; j++; k++; } free(L); free(R); } void merge_sort(i32* arr, i32 l, i32 r) { if ((r - l) > RECURSION_SIZE_LIMIT) { i32 m = l + (r - l) / 2; #pragma omp task merge_sort(arr, l, m); #pragma omp task merge_sort(arr, m, r); #pragma omp taskwait merge(arr, l, m, r); } else if (r - l > 1) { insertion_sort(arr, l, r); } } f64 do_it(i32* arr) { f64 before = omp_get_wtime(); #pragma omp parallel #pragma omp single merge_sort(arr, 0, FILE_ARRAY_SIZE); f64 after = omp_get_wtime(); return after - before; } i32 main(i32 argc, char** argv) { omp_set_nested(1); if (argc > 1) { i32 thread_count = atoi(argv[1]); omp_set_num_threads(thread_count); } // i32* arr = get_random_array(ARRAY_SIZE); i32* arr = malloc(sizeof(i32) * FILE_ARRAY_SIZE); i32* orig_arr = get_array_from_file(); f64 time_sum = 0; for (i32 i = 0; i < REPEAT_COUNT; i++) { for (i32 k = 0; k < FILE_ARRAY_SIZE; k++) { arr[k] = orig_arr[k]; } time_sum += do_it(arr); } output_array_to_file(arr); printf("%fus\n", (time_sum * 1000000) / REPEAT_COUNT); return 0; }
fill_int2e.c
/* * Author: Qiming Sun <osirpt.sun@gmail.com> */ #include <stdlib.h> #include <string.h> #include <math.h> #include "config.h" #include "cint.h" #define MAX(I,J) ((I) > (J) ? (I) : (J)) #define MIN(I,J) ((I) < (J) ? (I) : (J)) int GTOmax_shell_dim(int *ao_loc, int *shls_slice, int ncenter) { int i; int i0 = shls_slice[0]; int i1 = shls_slice[1]; int di = 0; for (i = 1; i < ncenter; i++) { i0 = MIN(i0, shls_slice[i*2 ]); i1 = MAX(i1, shls_slice[i*2+1]); } for (i = i0; i < i1; i++) { di = MAX(di, ao_loc[i+1]-ao_loc[i]); } return di; } int GTOmax_cache_size(int (*intor)(), int *shls_slice, int ncenter, int *atm, int natm, int *bas, int nbas, double *env) { int i, n; int i0 = shls_slice[0]; int i1 = shls_slice[1]; for (i = 1; i < ncenter; i++) { i0 = MIN(i0, shls_slice[i*2 ]); i1 = MAX(i1, shls_slice[i*2+1]); } int shls[4]; int cache_size = 0; for (i = i0; i < i1; i++) { shls[0] = i; shls[1] = i; shls[2] = i; shls[3] = i; n = (*intor)(NULL, NULL, shls, atm, natm, bas, nbas, env, NULL, NULL); cache_size = MAX(cache_size, n); } return cache_size; } /* ************************************************* * 2e AO integrals in s4, s2ij, s2kl, s1 */ void GTOnr2e_fill_s1(int (*intor)(), int (*fprescreen)(), double *eri, double *buf, int comp, int ishp, int jshp, int *shls_slice, int *ao_loc, CINTOpt *cintopt, int *atm, int natm, int *bas, int nbas, double *env) { int ish0 = shls_slice[0]; int ish1 = shls_slice[1]; int jsh0 = shls_slice[2]; int jsh1 = shls_slice[3]; int ksh0 = shls_slice[4]; int ksh1 = shls_slice[5]; int lsh0 = shls_slice[6]; int lsh1 = shls_slice[7]; int ni = ao_loc[ish1] - ao_loc[ish0]; int nj = ao_loc[jsh1] - ao_loc[jsh0]; int nk = ao_loc[ksh1] - ao_loc[ksh0]; int nl = ao_loc[lsh1] - ao_loc[lsh0]; size_t nij = ni * nj; size_t nkl = nk * nl; size_t neri = nij * nkl; int ish = ishp + ish0; int jsh = jshp + jsh0; int i0 = ao_loc[ish] - ao_loc[ish0]; int j0 = ao_loc[jsh] - ao_loc[jsh0]; eri += nkl * (i0 * nj + j0); int di = ao_loc[ish+1] - ao_loc[ish]; int dj = ao_loc[jsh+1] - ao_loc[jsh]; int dij = di * dj; int k0, l0, dk, dl, dijk, dijkl; int i, j, k, l, icomp; int ksh, lsh; int shls[4]; double *eri0, *peri, *buf0, *pbuf, *cache; shls[0] = ish; shls[1] = jsh; for (ksh = ksh0; ksh < ksh1; ksh++) { for (lsh = lsh0; lsh < lsh1; lsh++) { shls[2] = ksh; shls[3] = lsh; k0 = ao_loc[ksh] - ao_loc[ksh0]; l0 = ao_loc[lsh] - ao_loc[lsh0]; dk = ao_loc[ksh+1] - ao_loc[ksh]; dl = ao_loc[lsh+1] - ao_loc[lsh]; dijk = dij * dk; dijkl = dijk * dl; cache = buf + dijkl * comp; if ((*fprescreen)(shls, atm, bas, env) && (*intor)(buf, NULL, shls, atm, natm, bas, nbas, env, cintopt, cache)) { eri0 = eri + k0*nl+l0; buf0 = buf; for (icomp = 0; icomp < comp; icomp++) { for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { peri = eri0 + nkl*(i*nj+j); for (k = 0; k < dk; k++) { for (pbuf = buf0 + k*dij + j*di + i, l = 0; l < dl; l++) { peri[k*nl+l] = pbuf[l*dijk]; } } } } buf0 += dijkl; eri0 += neri; } } else { eri0 = eri + k0*nl+l0; for (icomp = 0; icomp < comp; icomp++) { for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { peri = eri0 + nkl*(i*nj+j); for (k = 0; k < dk; k++) { for (l = 0; l < dl; l++) { peri[k*nl+l] = 0; } } } } eri0 += neri; } } } } } void GTOnr2e_fill_s2ij(int (*intor)(), int (*fprescreen)(), double *eri, double *buf, int comp, int ishp, int jshp, int *shls_slice, int *ao_loc, CINTOpt *cintopt, int *atm, int natm, int *bas, int nbas, double *env) { if (ishp < jshp) { return; } int ish0 = shls_slice[0]; int ish1 = shls_slice[1]; int jsh0 = shls_slice[2]; //int jsh1 = shls_slice[3]; int ksh0 = shls_slice[4]; int ksh1 = shls_slice[5]; int lsh0 = shls_slice[6]; int lsh1 = shls_slice[7]; int ni = ao_loc[ish1] - ao_loc[ish0]; //int nj = ao_loc[jsh1] - ao_loc[jsh0]; int nk = ao_loc[ksh1] - ao_loc[ksh0]; int nl = ao_loc[lsh1] - ao_loc[lsh0]; size_t nij = ni * (ni+1) / 2; size_t nkl = nk * nl; size_t neri = nij * nkl; int ish = ishp + ish0; int jsh = jshp + jsh0; int i0 = ao_loc[ish] - ao_loc[ish0]; int j0 = ao_loc[jsh] - ao_loc[jsh0]; eri += nkl * (i0*(i0+1)/2 + j0); int di = ao_loc[ish+1] - ao_loc[ish]; int dj = ao_loc[jsh+1] - ao_loc[jsh]; int dij = di * dj; int k0, l0, dk, dl, dijk, dijkl; int i, j, k, l, icomp; int ksh, lsh; int shls[4]; double *eri0, *peri0, *peri, *buf0, *pbuf, *cache; shls[0] = ish; shls[1] = jsh; for (ksh = ksh0; ksh < ksh1; ksh++) { for (lsh = lsh0; lsh < lsh1; lsh++) { shls[2] = ksh; shls[3] = lsh; k0 = ao_loc[ksh] - ao_loc[ksh0]; l0 = ao_loc[lsh] - ao_loc[lsh0]; dk = ao_loc[ksh+1] - ao_loc[ksh]; dl = ao_loc[lsh+1] - ao_loc[lsh]; dijk = dij * dk; dijkl = dijk * dl; cache = buf + dijkl * comp; if ((*fprescreen)(shls, atm, bas, env) && (*intor)(buf, NULL, shls, atm, natm, bas, nbas, env, cintopt, cache)) { eri0 = eri + k0*nl+l0; buf0 = buf; for (icomp = 0; icomp < comp; icomp++) { peri0 = eri0; if (ishp > jshp) { for (i = 0; i < di; i++, peri0+=nkl*(i0+i)) { for (j = 0; j < dj; j++) { peri = peri0 + nkl*j; for (k = 0; k < dk; k++) { for (pbuf = buf0 + k*dij + j*di + i, l = 0; l < dl; l++) { peri[k*nl+l] = pbuf[l*dijk]; } } } } } else { for (i = 0; i < di; i++, peri0+=nkl*(i0+i)) { for (j = 0; j <= i; j++) { peri = peri0 + nkl*j; for (k = 0; k < dk; k++) { for (pbuf = buf0 + k*dij + j*di + i, l = 0; l < dl; l++) { peri[k*nl+l] = pbuf[l*dijk]; } } } } } buf0 += dijkl; eri0 += neri; } } else { eri0 = eri + k0*nl+l0; for (icomp = 0; icomp < comp; icomp++) { peri0 = eri0; if (ishp > jshp) { for (i = 0; i < di; i++, peri0+=nkl*(i0+i)) { for (j = 0; j < dj; j++) { peri = peri0 + nkl*j; for (k = 0; k < dk; k++) { for (l = 0; l < dl; l++) { peri[k*nl+l] = 0; } } } } } else { for (i = 0; i < di; i++, peri0+=nkl*(i0+i)) { for (j = 0; j <= i; j++) { peri = peri0 + nkl*j; for (k = 0; k < dk; k++) { for (l = 0; l < dl; l++) { peri[k*nl+l] = 0; } } } } } eri0 += neri; } } } } } void GTOnr2e_fill_s2kl(int (*intor)(), int (*fprescreen)(), double *eri, double *buf, int comp, int ishp, int jshp, int *shls_slice, int *ao_loc, CINTOpt *cintopt, int *atm, int natm, int *bas, int nbas, double *env) { int ish0 = shls_slice[0]; int ish1 = shls_slice[1]; int jsh0 = shls_slice[2]; int jsh1 = shls_slice[3]; int ksh0 = shls_slice[4]; int ksh1 = shls_slice[5]; int lsh0 = shls_slice[6]; //int lsh1 = shls_slice[7]; int ni = ao_loc[ish1] - ao_loc[ish0]; int nj = ao_loc[jsh1] - ao_loc[jsh0]; int nk = ao_loc[ksh1] - ao_loc[ksh0]; //int nl = ao_loc[lsh1] - ao_loc[lsh0]; size_t nij = ni * nj; size_t nkl = nk * (nk+1) / 2; size_t neri = nij * nkl; int ish = ishp + ish0; int jsh = jshp + jsh0; int i0 = ao_loc[ish] - ao_loc[ish0]; int j0 = ao_loc[jsh] - ao_loc[jsh0]; eri += nkl * (i0 * nj + j0); int di = ao_loc[ish+1] - ao_loc[ish]; int dj = ao_loc[jsh+1] - ao_loc[jsh]; int dij = di * dj; int k0, l0, dk, dl, dijk, dijkl; int i, j, k, l, icomp; int ksh, lsh, kshp, lshp; int shls[4]; double *eri0, *peri, *buf0, *pbuf, *cache; shls[0] = ish; shls[1] = jsh; for (kshp = 0; kshp < ksh1-ksh0; kshp++) { for (lshp = 0; lshp <= kshp; lshp++) { ksh = kshp + ksh0; lsh = lshp + lsh0; shls[2] = ksh; shls[3] = lsh; k0 = ao_loc[ksh] - ao_loc[ksh0]; l0 = ao_loc[lsh] - ao_loc[lsh0]; dk = ao_loc[ksh+1] - ao_loc[ksh]; dl = ao_loc[lsh+1] - ao_loc[lsh]; dijk = dij * dk; dijkl = dijk * dl; cache = buf + dijkl * comp; if ((*fprescreen)(shls, atm, bas, env) && (*intor)(buf, NULL, shls, atm, natm, bas, nbas, env, cintopt, cache)) { eri0 = eri + k0*(k0+1)/2+l0; buf0 = buf; for (icomp = 0; icomp < comp; icomp++) { if (kshp > lshp) { for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { peri = eri0 + nkl*(i*nj+j); for (k = 0; k < dk; k++, peri+=k0+k) { for (pbuf = buf0 + k*dij + j*di + i, l = 0; l < dl; l++) { peri[l] = pbuf[l*dijk]; } } } } } else { for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { peri = eri0 + nkl*(i*nj+j); for (k = 0; k < dk; k++, peri+=k0+k) { for (pbuf = buf0 + k*dij + j*di + i, l = 0; l <= k; l++) { peri[l] = pbuf[l*dijk]; } } } } } buf0 += dijkl; eri0 += neri; } } else { eri0 = eri + k0*(k0+1)/2+l0; for (icomp = 0; icomp < comp; icomp++) { if (kshp > lshp) { for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { peri = eri0 + nkl*(i*nj+j); for (k = 0; k < dk; k++, peri+=k0+k) { for (l = 0; l < dl; l++) { peri[l] = 0; } } } } } else { for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { peri = eri0 + nkl*(i*nj+j); for (k = 0; k < dk; k++, peri+=k0+k) { for (l = 0; l <= k; l++) { peri[l] = 0; } } } } } eri0 += neri; } } } } } void GTOnr2e_fill_s4(int (*intor)(), int (*fprescreen)(), double *eri, double *buf, int comp, int ishp, int jshp, int *shls_slice, int *ao_loc, CINTOpt *cintopt, int *atm, int natm, int *bas, int nbas, double *env) { if (ishp < jshp) { return; } int ish0 = shls_slice[0]; int ish1 = shls_slice[1]; int jsh0 = shls_slice[2]; //int jsh1 = shls_slice[3]; int ksh0 = shls_slice[4]; int ksh1 = shls_slice[5]; int lsh0 = shls_slice[6]; //int lsh1 = shls_slice[7]; int ni = ao_loc[ish1] - ao_loc[ish0]; //int nj = ao_loc[jsh1] - ao_loc[jsh0]; int nk = ao_loc[ksh1] - ao_loc[ksh0]; //int nl = ao_loc[lsh1] - ao_loc[lsh0]; size_t nij = ni * (ni+1) / 2; size_t nkl = nk * (nk+1) / 2; size_t neri = nij * nkl; int ish = ishp + ish0; int jsh = jshp + jsh0; int i0 = ao_loc[ish] - ao_loc[ish0]; int j0 = ao_loc[jsh] - ao_loc[jsh0]; eri += nkl * (i0*(i0+1)/2 + j0); int di = ao_loc[ish+1] - ao_loc[ish]; int dj = ao_loc[jsh+1] - ao_loc[jsh]; int dij = di * dj; int k0, l0, dk, dl, dijk, dijkl; int i, j, k, l, icomp; int ksh, lsh, kshp, lshp; int shls[4]; double *eri0, *peri0, *peri, *buf0, *pbuf, *cache; shls[0] = ish; shls[1] = jsh; for (kshp = 0; kshp < ksh1-ksh0; kshp++) { for (lshp = 0; lshp <= kshp; lshp++) { ksh = kshp + ksh0; lsh = lshp + lsh0; shls[2] = ksh; shls[3] = lsh; k0 = ao_loc[ksh] - ao_loc[ksh0]; l0 = ao_loc[lsh] - ao_loc[lsh0]; dk = ao_loc[ksh+1] - ao_loc[ksh]; dl = ao_loc[lsh+1] - ao_loc[lsh]; dijk = dij * dk; dijkl = dijk * dl; cache = buf + dijkl * comp; if ((*fprescreen)(shls, atm, bas, env) && (*intor)(buf, NULL, shls, atm, natm, bas, nbas, env, cintopt, cache)) { eri0 = eri + k0*(k0+1)/2+l0; buf0 = buf; for (icomp = 0; icomp < comp; icomp++) { peri0 = eri0; if (kshp > lshp && ishp > jshp) { for (i = 0; i < di; i++, peri0+=nkl*(i0+i)) { for (j = 0; j < dj; j++) { peri = peri0 + nkl*j; for (k = 0; k < dk; k++, peri+=k0+k) { for (pbuf = buf0 + k*dij + j*di + i, l = 0; l < dl; l++) { peri[l] = pbuf[l*dijk]; } } } } } else if (ish > jsh) { for (i = 0; i < di; i++, peri0+=nkl*(i0+i)) { for (j = 0; j < dj; j++) { peri = peri0 + nkl*j; for (k = 0; k < dk; k++, peri+=k0+k) { for (pbuf = buf0 + k*dij + j*di + i, l = 0; l <= k; l++) { peri[l] = pbuf[l*dijk]; } } } } } else if (ksh > lsh) { for (i = 0; i < di; i++, peri0+=nkl*(i0+i)) { for (j = 0; j <= i; j++) { peri = peri0 + nkl*j; for (k = 0; k < dk; k++, peri+=k0+k) { for (pbuf = buf0 + k*dij + j*di + i, l = 0; l < dl; l++) { peri[l] = pbuf[l*dijk]; } } } } } else { for (i = 0; i < di; i++, peri0+=nkl*(i0+i)) { for (j = 0; j <= i; j++) { peri = peri0 + nkl*j; for (k = 0; k < dk; k++, peri+=k0+k) { for (pbuf = buf0 + k*dij + j*di + i, l = 0; l <= k; l++) { peri[l] = pbuf[l*dijk]; } } } } } buf0 += dijkl; eri0 += neri; } } else { eri0 = eri + k0*(k0+1)/2+l0; buf0 = buf; for (icomp = 0; icomp < comp; icomp++) { peri0 = eri0; if (kshp > lshp && ishp > jshp) { for (i = 0; i < di; i++, peri0+=nkl*(i0+i)) { for (j = 0; j < dj; j++) { peri = peri0 + nkl*j; for (k = 0; k < dk; k++, peri+=k0+k) { for (l = 0; l < dl; l++) { peri[l] = 0; } } } } } else if (ish > jsh) { for (i = 0; i < di; i++, peri0+=nkl*(i0+i)) { for (j = 0; j < dj; j++) { peri = peri0 + nkl*j; for (k = 0; k < dk; k++, peri+=k0+k) { for (l = 0; l <= k; l++) { peri[l] = 0; } } } } } else if (ksh > lsh) { for (i = 0; i < di; i++, peri0+=nkl*(i0+i)) { for (j = 0; j <= i; j++) { peri = peri0 + nkl*j; for (k = 0; k < dk; k++, peri+=k0+k) { for (l = 0; l < dl; l++) { peri[l] = 0; } } } } } else { for (i = 0; i < di; i++, peri0+=nkl*(i0+i)) { for (j = 0; j <= i; j++) { peri = peri0 + nkl*j; for (k = 0; k < dk; k++, peri+=k0+k) { for (l = 0; l <= k; l++) { peri[l] = 0; } } } } } eri0 += neri; } } } } } static int no_prescreen() { return 1; } void GTOnr2e_fill_drv(int (*intor)(), void (*fill)(), int (*fprescreen)(), double *eri, int comp, int *shls_slice, int *ao_loc, CINTOpt *cintopt, int *atm, int natm, int *bas, int nbas, double *env) { if (fprescreen == NULL) { fprescreen = no_prescreen; } const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; const int nish = ish1 - ish0; const int njsh = jsh1 - jsh0; const int di = GTOmax_shell_dim(ao_loc, shls_slice, 4); const int cache_size = GTOmax_cache_size(intor, shls_slice, 4, atm, natm, bas, nbas, env); #pragma omp parallel default(none) \ shared(fill, fprescreen, eri, intor, comp, \ shls_slice, ao_loc, cintopt, atm, natm, bas, nbas, env) { int ij, i, j; double *buf = malloc(sizeof(double) * (di*di*di*di*comp + cache_size)); #pragma omp for nowait schedule(dynamic) for (ij = 0; ij < nish*njsh; ij++) { i = ij / njsh; j = ij % njsh; (*fill)(intor, fprescreen, eri, buf, comp, i, j, shls_slice, ao_loc, cintopt, atm, natm, bas, nbas, env); } free(buf); } }
loss_mat.h
#ifndef LOSS_MAT_H #define LOSS_MAT_H #include "../loss.h" template <typename M, typename L> class LinearLossMat : public Loss< M, L, Matrix<typename M::value_type> > { public: typedef Loss< M, L, Matrix<typename M::value_type> > loss_type; typedef typename M::value_type T; LinearLossMat(DataMatrixLinear<M>& data, const L& y) : loss_type(data,y), _data(data) { }; virtual void add_grad(const Matrix<T>& input, const INTM i, Matrix<T>& output, const T a = T(1.0)) const { Vector<T> sgrad; scal_grad(input,i,sgrad); _data.add_dual_pred(i,sgrad,output,a); }; inline void double_add_grad(const Matrix<T>& input1, const Matrix<T>& input2, const INTM i, Matrix<T>& output, const T eta1 = T(1.0), const T eta2 = -T(1.0), const T dummy = T(1.0)) const { Vector<T> sgrad2; Vector<T> sgrad1; scal_grad(input1,i,sgrad1); scal_grad(input2,i,sgrad2); sgrad1.add_scal(sgrad2,eta2,eta1); _data.add_dual_pred(i,sgrad1,output); }; virtual bool transpose() const { return true; }; virtual void add_feature(const Matrix<T>& input, Matrix<T>& output, const T s) const { _data.add_dual_pred(input,output,s,T(1.0)); } virtual void add_feature(Matrix<T>& output, const INTM i, const Vector<T>& s) const { _data.add_dual_pred(i,s,output,T(1.0),T(1.0)); }; virtual void scal_grad(const Matrix<T>& input, const INTM i, Vector<T>& output) const = 0; DataMatrixLinear<M>& data() const { return _data;}; protected: DataMatrixLinear<M>& _data; }; template <typename loss_type> class LossMat : public LinearLossMat<typename loss_type::data_type, Matrix<typename loss_type::value_type> > { public: typedef typename loss_type::value_type T; typedef typename loss_type::data_type M; typedef typename loss_type::label_type L; typedef LinearLossMat<M, Matrix<T> > base_loss; LossMat(DataMatrixLinear<M>& data, const Matrix<T>& y) : base_loss(data,y), _N(y.m()) { _losses=new loss_type*[_N]; _datas=new DataLinear<M>*[_N]; _n = y.n(); y.transpose(_yT); Vector<T> ycol; for (int i = 0; i<_N; ++i) { _datas[i]=data.toDataLinear(); _yT.refCol(i,ycol); _losses[i]=new loss_type(*(_datas[i]),ycol); } this->_id=_losses[0]->id(); }; virtual ~LossMat() { for (int i = 0; i<_N; ++i) { delete(_losses[i]); delete(_datas[i]); _losses[i]=NULL; _datas[i]=NULL; } delete[](_losses); delete[](_datas); }; inline T eval(const Matrix<T>& input) const { T sum=0; #pragma omp parallel for reduction(+ : sum) for (int ii=0; ii<_N; ++ii) { Vector<T> col; input.refCol(ii,col); sum += _losses[ii]->eval(col); } return sum; }; inline T eval(const Matrix<T>& input, const INTM i) const { T sum=0; //#pragma omp parallel for reduction(+ : sum) num_threads(2) for (int ii=0; ii<_N; ++ii) { Vector<T> col; input.refCol(ii,col); sum += _losses[ii]->eval(col,i); } return sum; }; inline void add_grad(const Matrix<T>& input, const INTM i, Matrix<T>& output, const T eta = T(1.0)) const { output.resize(input.m(),input.n()); //#pragma omp parallel for num_threads(2) for (int ii=0; ii<_N; ++ii) { Vector<T> col_input, col_output; input.refCol(ii,col_input); output.refCol(ii,col_output); _losses[ii]->add_grad(col_input,i,col_output,eta); } }; inline void double_add_grad(const Matrix<T>& input1, const Matrix<T>& input2, const INTM i, Matrix<T>& output, const T eta1 = T(1.0), const T eta2 = -T(1.0), const T dummy =T(1.0)) const { output.resize(input1.m(),input1.n()); //#pragma omp parallel for num_threads(2) for (int ii=0; ii<_N; ++ii) { Vector<T> col_input1, col_input2, col_output; input1.refCol(ii,col_input1); input2.refCol(ii,col_input2); output.refCol(ii,col_output); _losses[ii]->double_add_grad(col_input1,col_input2,i,col_output,eta1,eta2,dummy); } }; inline void grad(const Matrix<T>& input, Matrix<T>& output) const { output.resize(input.m(),input.n()); #pragma omp parallel for for (int ii=0; ii<_N; ++ii) { Vector<T> col_input, col_output; input.refCol(ii,col_input); output.refCol(ii,col_output); _losses[ii]->grad(col_input,col_output); } } inline void print() const { logging(logINFO) << "Loss for matrices"; _losses[0]->print(); }; inline bool provides_fenchel() const { return _losses[0]->provides_fenchel(); }; inline void get_dual_variable(const Matrix<T>& input, Matrix<T>& grad1, Matrix<T>& grad2) const { grad1.resize(_n,input.n()); grad2.resize(input.m(),input.n()); #pragma omp parallel for for (int ii=0; ii<_N; ++ii) { Vector<T> col1, col2, col3; input.refCol(ii,col1); grad1.refCol(ii,col2); grad2.refCol(ii,col3); _losses[ii]->get_dual_variable(col1,col2,col3); } }; inline T fenchel(const Matrix<T>& input) const { T sum=0; #pragma omp parallel for reduction(+ : sum) for (int ii=0; ii<_N; ++ii) { Vector<T> col; input.copyCol(ii,col); sum+=_losses[ii]->fenchel(col); } return sum; }; inline T lipschitz() const { return _losses[0]->lipschitz(); } inline void lipschitz(Vector<T>& Li) const { _losses[0]->lipschitz(Li); }; // input; nclass x n // output: p x nclass virtual void add_feature(const Matrix<T>& input, Matrix<T>& output, const T s) const { #pragma omp parallel for for (int ii=0; ii<_N; ++ii) { Vector<T> col1, col2; input.copyRow(ii,col1); output.refCol(ii,col2); _losses[ii]->add_feature(col1,col2,s); } } virtual void add_feature(Matrix<T>& output, const INTM i, const Vector<T>& s) const { //#pragma omp parallel for num_threads(2) for (int ii=0; ii<_N; ++ii) { Vector<T> col; output.refCol(ii,col); _losses[ii]->add_feature(col,i,s[ii]); } }; virtual void scal_grad(const Matrix<T>& input, const INTM i, Vector<T>& output) const { output.resize(_N); //#pragma omp parallel for num_threads(2) for (int ii=0; ii<_N; ++ii) { Vector<T> col; input.refCol(ii,col); _losses[ii]->scal_grad(col,i,output[ii]); } }; virtual bool transpose() const { return false; }; virtual void get_grad_aux(const Matrix<T>& input, Matrix<T>& grad1) const { logging(logERROR) << "Not used"; }; virtual T lipschitz_constant() const { logging(logERROR) << "Not used"; return 0; }; virtual void get_dual_constraints(Matrix<T>& grad1) const { logging(logERROR) << "Not used"; }; protected: int _N; int _n; loss_type** _losses; DataLinear<M>** _datas; Matrix<T> _yT; }; #endif
BaseDetector.h
#pragma once #include <memory> #include "defines.h" /// /// \brief The BaseDetector class /// class BaseDetector { public: /// /// \brief BaseDetector /// \param frame /// BaseDetector(const cv::UMat& frame) { m_minObjectSize.width = std::max(5, frame.cols / 100); m_minObjectSize.height = m_minObjectSize.width; } /// /// \brief ~BaseDetector /// virtual ~BaseDetector(void) = default; /// /// \brief Init /// \param config /// virtual bool Init(const config_t& config) = 0; /// /// \brief Detect /// \param frame /// virtual void Detect(const cv::UMat& frame) = 0; /// /// \brief Detect /// \param frames /// \param regions /// virtual void Detect(const std::vector<cv::UMat>& frames, std::vector<regions_t>& regions) { for (size_t i = 0; i < frames.size(); ++i) { Detect(frames[i]); auto res = GetDetects(); regions[i].assign(std::begin(res), std::end(res)); } } /// /// \brief ResetModel /// \param img /// \param roiRect /// virtual void ResetModel(const cv::UMat& /*img*/, const cv::Rect& /*roiRect*/) { } /// /// \brief CanGrayProcessing /// virtual bool CanGrayProcessing() const = 0; /// /// \brief SetMinObjectSize /// \param minObjectSize /// void SetMinObjectSize(cv::Size minObjectSize) { m_minObjectSize = minObjectSize; } /// /// \brief GetDetects /// \return /// const regions_t& GetDetects() const { return m_regions; } /// /// \brief CalcMotionMap /// \param frame /// virtual void CalcMotionMap(cv::Mat& frame) { if (m_motionMap.size() != frame.size()) m_motionMap = cv::Mat(frame.size(), CV_32FC1, cv::Scalar(0, 0, 0)); cv::Mat foreground(m_motionMap.size(), CV_8UC1, cv::Scalar(0, 0, 0)); for (const auto& region : m_regions) { #if (CV_VERSION_MAJOR < 4) cv::ellipse(foreground, region.m_rrect, cv::Scalar(255, 255, 255), CV_FILLED); #else cv::ellipse(foreground, region.m_rrect, cv::Scalar(255, 255, 255), cv::FILLED); #endif } cv::normalize(foreground, m_normFor, 255, 0, cv::NORM_MINMAX, m_motionMap.type()); double alpha = 0.95; cv::addWeighted(m_motionMap, alpha, m_normFor, 1 - alpha, 0, m_motionMap); const int chans = frame.channels(); const int height = frame.rows; #pragma omp parallel for for (int y = 0; y < height; ++y) { uchar* imgPtr = frame.ptr(y); const float* moPtr = reinterpret_cast<float*>(m_motionMap.ptr(y)); for (int x = 0; x < frame.cols; ++x) { for (int ci = chans - 1; ci < chans; ++ci) { imgPtr[ci] = cv::saturate_cast<uchar>(imgPtr[ci] + moPtr[0]); } imgPtr += chans; ++moPtr; } } } protected: regions_t m_regions; cv::Size m_minObjectSize; // Motion map for visualization current detections cv::Mat m_motionMap; cv::Mat m_normFor; std::set<objtype_t> m_classesWhiteList; std::vector<cv::Rect> GetCrops(float maxCropRatio, cv::Size netSize, cv::Size imgSize) const { std::vector<cv::Rect> crops; const float whRatio = static_cast<float>(netSize.width) / static_cast<float>(netSize.height); int cropHeight = cvRound(maxCropRatio * netSize.height); int cropWidth = cvRound(maxCropRatio * netSize.width); if (imgSize.width / (float)imgSize.height > whRatio) { if (cropHeight >= imgSize.height) cropHeight = imgSize.height; cropWidth = cvRound(cropHeight * whRatio); } else { if (cropWidth >= imgSize.width) cropWidth = imgSize.width; cropHeight = cvRound(cropWidth / whRatio); } //std::cout << "Frame size " << imgSize << ", crop size = " << cv::Size(cropWidth, cropHeight) << ", ratio = " << maxCropRatio << std::endl; const int stepX = 3 * cropWidth / 4; const int stepY = 3 * cropHeight / 4; for (int y = 0; y < imgSize.height; y += stepY) { bool needBreakY = false; if (y + cropHeight >= imgSize.height) { y = imgSize.height - cropHeight; needBreakY = true; } for (int x = 0; x < imgSize.width; x += stepX) { bool needBreakX = false; if (x + cropWidth >= imgSize.width) { x = imgSize.width - cropWidth; needBreakX = true; } crops.emplace_back(x, y, cropWidth, cropHeight); if (needBreakX) break; } if (needBreakY) break; } return crops; } /// bool FillTypesMap(const std::vector<std::string>& classNames) { bool res = true; m_typesMap.resize(classNames.size(), bad_type); for (size_t i = 0; i < classNames.size(); ++i) { objtype_t type = TypeConverter::Str2Type(classNames[i]); m_typesMap[i] = type; res &= (type != bad_type); } return res; } /// objtype_t T2T(size_t typeInd) const { if (typeInd < m_typesMap.size()) return m_typesMap[typeInd]; else return bad_type; } private: std::vector<objtype_t> m_typesMap; }; /// /// \brief CreateDetector /// \param detectorType /// \param gray /// \return /// std::unique_ptr<BaseDetector> CreateDetector(tracking::Detectors detectorType, const config_t& config, cv::UMat& gray);
GB_binop__lxor_fp32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__lxor_fp32) // A.*B function (eWiseMult): GB (_AemultB_08__lxor_fp32) // A.*B function (eWiseMult): GB (_AemultB_02__lxor_fp32) // A.*B function (eWiseMult): GB (_AemultB_04__lxor_fp32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__lxor_fp32) // A*D function (colscale): GB (_AxD__lxor_fp32) // D*A function (rowscale): GB (_DxB__lxor_fp32) // C+=B function (dense accum): GB (_Cdense_accumB__lxor_fp32) // C+=b function (dense accum): GB (_Cdense_accumb__lxor_fp32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lxor_fp32) // C=scalar+B GB (_bind1st__lxor_fp32) // C=scalar+B' GB (_bind1st_tran__lxor_fp32) // C=A+scalar GB (_bind2nd__lxor_fp32) // C=A'+scalar GB (_bind2nd_tran__lxor_fp32) // C type: float // A type: float // B,b type: float // BinaryOp: cij = ((aij != 0) != (bij != 0)) #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ float // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ float aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ float bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ float t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = ((x != 0) != (y != 0)) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LXOR || GxB_NO_FP32 || GxB_NO_LXOR_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__lxor_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__lxor_fp32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__lxor_fp32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__lxor_fp32) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__lxor_fp32) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__lxor_fp32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__lxor_fp32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__lxor_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__lxor_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__lxor_fp32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__lxor_fp32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *Cx = (float *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; float bij = GBX (Bx, p, false) ; Cx [p] = ((x != 0) != (bij != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__lxor_fp32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; float *Cx = (float *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; float aij = GBX (Ax, p, false) ; Cx [p] = ((aij != 0) != (y != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = ((x != 0) != (aij != 0)) ; \ } GrB_Info GB (_bind1st_tran__lxor_fp32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = ((aij != 0) != (y != 0)) ; \ } GrB_Info GB (_bind2nd_tran__lxor_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
tinyexr.h
/* Copyright (c) 2014 - 2018, Syoyo Fujita and many contributors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the Syoyo Fujita nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ // TinyEXR contains some OpenEXR code, which is licensed under ------------ /////////////////////////////////////////////////////////////////////////// // // Copyright (c) 2002, Industrial Light & Magic, a division of Lucas // Digital Ltd. LLC // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Industrial Light & Magic nor the names of // its contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // /////////////////////////////////////////////////////////////////////////// // End of OpenEXR license ------------------------------------------------- #ifndef TINYEXR_H_ #define TINYEXR_H_ // // // Do this: // #define TINYEXR_IMPLEMENTATION // before you include this file in *one* C or C++ file to create the // implementation. // // // i.e. it should look like this: // #include ... // #include ... // #include ... // #define TINYEXR_IMPLEMENTATION // #include "tinyexr.h" // // #include <stddef.h> // for size_t #include <stdint.h> // guess stdint.h is available(C99) #ifdef __cplusplus extern "C" { #endif // Use embedded miniz or not to decode ZIP format pixel. Linking with zlib // required if this flas is 0. #ifndef TINYEXR_USE_MINIZ #define TINYEXR_USE_MINIZ (1) #endif // Disable PIZ comporession when applying cpplint. #ifndef TINYEXR_USE_PIZ #define TINYEXR_USE_PIZ (1) #endif #ifndef TINYEXR_USE_ZFP #define TINYEXR_USE_ZFP (0) // TinyEXR extension. // http://computation.llnl.gov/projects/floating-point-compression #endif #define TINYEXR_SUCCESS (0) #define TINYEXR_ERROR_INVALID_MAGIC_NUMBER (-1) #define TINYEXR_ERROR_INVALID_EXR_VERSION (-2) #define TINYEXR_ERROR_INVALID_ARGUMENT (-3) #define TINYEXR_ERROR_INVALID_DATA (-4) #define TINYEXR_ERROR_INVALID_FILE (-5) #define TINYEXR_ERROR_INVALID_PARAMETER (-5) #define TINYEXR_ERROR_CANT_OPEN_FILE (-6) #define TINYEXR_ERROR_UNSUPPORTED_FORMAT (-7) #define TINYEXR_ERROR_INVALID_HEADER (-8) #define TINYEXR_ERROR_UNSUPPORTED_FEATURE (-9) #define TINYEXR_ERROR_CANT_WRITE_FILE (-10) #define TINYEXR_ERROR_SERIALZATION_FAILED (-11) // @note { OpenEXR file format: http://www.openexr.com/openexrfilelayout.pdf } // pixel type: possible values are: UINT = 0 HALF = 1 FLOAT = 2 #define TINYEXR_PIXELTYPE_UINT (0) #define TINYEXR_PIXELTYPE_HALF (1) #define TINYEXR_PIXELTYPE_FLOAT (2) #define TINYEXR_MAX_HEADER_ATTRIBUTES (1024) #define TINYEXR_MAX_CUSTOM_ATTRIBUTES (128) #define TINYEXR_COMPRESSIONTYPE_NONE (0) #define TINYEXR_COMPRESSIONTYPE_RLE (1) #define TINYEXR_COMPRESSIONTYPE_ZIPS (2) #define TINYEXR_COMPRESSIONTYPE_ZIP (3) #define TINYEXR_COMPRESSIONTYPE_PIZ (4) #define TINYEXR_COMPRESSIONTYPE_ZFP (128) // TinyEXR extension #define TINYEXR_ZFP_COMPRESSIONTYPE_RATE (0) #define TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION (1) #define TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY (2) #define TINYEXR_TILE_ONE_LEVEL (0) #define TINYEXR_TILE_MIPMAP_LEVELS (1) #define TINYEXR_TILE_RIPMAP_LEVELS (2) #define TINYEXR_TILE_ROUND_DOWN (0) #define TINYEXR_TILE_ROUND_UP (1) typedef struct _EXRVersion { int version; // this must be 2 int tiled; // tile format image int long_name; // long name attribute int non_image; // deep image(EXR 2.0) int multipart; // multi-part(EXR 2.0) } EXRVersion; typedef struct _EXRAttribute { char name[256]; // name and type are up to 255 chars long. char type[256]; unsigned char *value; // uint8_t* int size; int pad0; } EXRAttribute; typedef struct _EXRChannelInfo { char name[256]; // less than 255 bytes long int pixel_type; int x_sampling; int y_sampling; unsigned char p_linear; unsigned char pad[3]; } EXRChannelInfo; typedef struct _EXRTile { int offset_x; int offset_y; int level_x; int level_y; int width; // actual width in a tile. int height; // actual height int a tile. unsigned char **images; // image[channels][pixels] } EXRTile; typedef struct _EXRHeader { float pixel_aspect_ratio; int line_order; int data_window[4]; int display_window[4]; float screen_window_center[2]; float screen_window_width; int chunk_count; // Properties for tiled format(`tiledesc`). int tiled; int tile_size_x; int tile_size_y; int tile_level_mode; int tile_rounding_mode; int long_name; int non_image; int multipart; unsigned int header_len; // Custom attributes(exludes required attributes(e.g. `channels`, // `compression`, etc) int num_custom_attributes; EXRAttribute *custom_attributes; // array of EXRAttribute. size = // `num_custom_attributes`. EXRChannelInfo *channels; // [num_channels] int *pixel_types; // Loaded pixel type(TINYEXR_PIXELTYPE_*) of `images` for // each channel. This is overwritten with `requested_pixel_types` when // loading. int num_channels; int compression_type; // compression type(TINYEXR_COMPRESSIONTYPE_*) int *requested_pixel_types; // Filled initially by // ParseEXRHeaderFrom(Meomory|File), then users // can edit it(only valid for HALF pixel type // channel) } EXRHeader; typedef struct _EXRMultiPartHeader { int num_headers; EXRHeader *headers; } EXRMultiPartHeader; typedef struct _EXRImage { EXRTile *tiles; // Tiled pixel data. The application must reconstruct image // from tiles manually. NULL if scanline format. unsigned char **images; // image[channels][pixels]. NULL if tiled format. int width; int height; int num_channels; // Properties for tile format. int num_tiles; } EXRImage; typedef struct _EXRMultiPartImage { int num_images; EXRImage *images; } EXRMultiPartImage; typedef struct _DeepImage { const char **channel_names; float ***image; // image[channels][scanlines][samples] int **offset_table; // offset_table[scanline][offsets] int num_channels; int width; int height; int pad0; } DeepImage; // @deprecated { to be removed. } // Loads single-frame OpenEXR image. Assume EXR image contains A(single channel // alpha) or RGB(A) channels. // Application must free image data as returned by `out_rgba` // Result image format is: float x RGBA x width x hight // Returns negative value and may set error string in `err` when there's an // error extern int LoadEXR(float **out_rgba, int *width, int *height, const char *filename, const char **err); // @deprecated { to be removed. } // Saves single-frame OpenEXR image. Assume EXR image contains RGB(A) channels. // components must be 1(Grayscale), 3(RGB) or 4(RGBA). // Input image format is: `float x width x height`, or `float x RGB(A) x width x // hight` // Save image as fp16(HALF) format when `save_as_fp16` is positive non-zero // value. // Save image as fp32(FLOAT) format when `save_as_fp16` is 0. // Use ZIP compression by default. // Returns negative value and may set error string in `err` when there's an // error extern int SaveEXR(const float *data, const int width, const int height, const int components, const int save_as_fp16, const char *filename, const char **err); // Initialize EXRHeader struct extern void InitEXRHeader(EXRHeader *exr_header); // Initialize EXRImage struct extern void InitEXRImage(EXRImage *exr_image); // Free's internal data of EXRHeader struct extern int FreeEXRHeader(EXRHeader *exr_header); // Free's internal data of EXRImage struct extern int FreeEXRImage(EXRImage *exr_image); // Free's error message extern void FreeEXRErrorMessage(const char *msg); // Parse EXR version header of a file. extern int ParseEXRVersionFromFile(EXRVersion *version, const char *filename); // Parse EXR version header from memory-mapped EXR data. extern int ParseEXRVersionFromMemory(EXRVersion *version, const unsigned char *memory, size_t size); // Parse single-part OpenEXR header from a file and initialize `EXRHeader`. // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int ParseEXRHeaderFromFile(EXRHeader *header, const EXRVersion *version, const char *filename, const char **err); // Parse single-part OpenEXR header from a memory and initialize `EXRHeader`. // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int ParseEXRHeaderFromMemory(EXRHeader *header, const EXRVersion *version, const unsigned char *memory, size_t size, const char **err); // Parse multi-part OpenEXR headers from a file and initialize `EXRHeader*` // array. // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int ParseEXRMultipartHeaderFromFile(EXRHeader ***headers, int *num_headers, const EXRVersion *version, const char *filename, const char **err); // Parse multi-part OpenEXR headers from a memory and initialize `EXRHeader*` // array // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int ParseEXRMultipartHeaderFromMemory(EXRHeader ***headers, int *num_headers, const EXRVersion *version, const unsigned char *memory, size_t size, const char **err); // Loads single-part OpenEXR image from a file. // Application must setup `ParseEXRHeaderFromFile` before calling this function. // Application can free EXRImage using `FreeEXRImage` // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadEXRImageFromFile(EXRImage *image, const EXRHeader *header, const char *filename, const char **err); // Loads single-part OpenEXR image from a memory. // Application must setup `EXRHeader` with // `ParseEXRHeaderFromMemory` before calling this function. // Application can free EXRImage using `FreeEXRImage` // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadEXRImageFromMemory(EXRImage *image, const EXRHeader *header, const unsigned char *memory, const size_t size, const char **err); // Loads multi-part OpenEXR image from a file. // Application must setup `ParseEXRMultipartHeaderFromFile` before calling this // function. // Application can free EXRImage using `FreeEXRImage` // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadEXRMultipartImageFromFile(EXRImage *images, const EXRHeader **headers, unsigned int num_parts, const char *filename, const char **err); // Loads multi-part OpenEXR image from a memory. // Application must setup `EXRHeader*` array with // `ParseEXRMultipartHeaderFromMemory` before calling this function. // Application can free EXRImage using `FreeEXRImage` // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadEXRMultipartImageFromMemory(EXRImage *images, const EXRHeader **headers, unsigned int num_parts, const unsigned char *memory, const size_t size, const char **err); // Saves multi-channel, single-frame OpenEXR image to a file. // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int SaveEXRImageToFile(const EXRImage *image, const EXRHeader *exr_header, const char *filename, const char **err); // Saves multi-channel, single-frame OpenEXR image to a memory. // Image is compressed using EXRImage.compression value. // Return the number of bytes if success. // Return zero and will set error string in `err` when there's an // error. // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern size_t SaveEXRImageToMemory(const EXRImage *image, const EXRHeader *exr_header, unsigned char **memory, const char **err); // Loads single-frame OpenEXR deep image. // Application must free memory of variables in DeepImage(image, offset_table) // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadDeepEXR(DeepImage *out_image, const char *filename, const char **err); // NOT YET IMPLEMENTED: // Saves single-frame OpenEXR deep image. // Returns negative value and may set error string in `err` when there's an // error // extern int SaveDeepEXR(const DeepImage *in_image, const char *filename, // const char **err); // NOT YET IMPLEMENTED: // Loads multi-part OpenEXR deep image. // Application must free memory of variables in DeepImage(image, offset_table) // extern int LoadMultiPartDeepEXR(DeepImage **out_image, int num_parts, const // char *filename, // const char **err); // For emscripten. // Loads single-frame OpenEXR image from memory. Assume EXR image contains // RGB(A) channels. // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadEXRFromMemory(float **out_rgba, int *width, int *height, const unsigned char *memory, size_t size, const char **err); #ifdef __cplusplus } #endif #endif // TINYEXR_H_ #ifdef TINYEXR_IMPLEMENTATION #ifndef TINYEXR_IMPLEMENTATION_DEIFNED #define TINYEXR_IMPLEMENTATION_DEIFNED #include <algorithm> #include <cassert> #include <cstdio> #include <cstdlib> #include <cstring> #include <iostream> #include <sstream> #include <limits> #include <string> #include <vector> #if __cplusplus > 199711L // C++11 #include <cstdint> #endif // __cplusplus > 199711L #ifdef _OPENMP #include <omp.h> #endif #if TINYEXR_USE_MINIZ #else // Issue #46. Please include your own zlib-compatible API header before // including `tinyexr.h` //#include "zlib.h" #endif #if TINYEXR_USE_ZFP #include "zfp.h" #endif namespace tinyexr { #if __cplusplus > 199711L // C++11 typedef uint64_t tinyexr_uint64; typedef int64_t tinyexr_int64; #else // Although `long long` is not a standard type pre C++11, assume it is defined // as a compiler's extension. #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wc++11-long-long" #endif typedef unsigned long long tinyexr_uint64; typedef long long tinyexr_int64; #ifdef __clang__ #pragma clang diagnostic pop #endif #endif #if TINYEXR_USE_MINIZ namespace miniz { #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wc++11-long-long" #pragma clang diagnostic ignored "-Wold-style-cast" #pragma clang diagnostic ignored "-Wpadded" #pragma clang diagnostic ignored "-Wsign-conversion" #pragma clang diagnostic ignored "-Wc++11-extensions" #pragma clang diagnostic ignored "-Wconversion" #pragma clang diagnostic ignored "-Wunused-function" #pragma clang diagnostic ignored "-Wc++98-compat-pedantic" #pragma clang diagnostic ignored "-Wundef" #if __has_warning("-Wcomma") #pragma clang diagnostic ignored "-Wcomma" #endif #if __has_warning("-Wmacro-redefined") #pragma clang diagnostic ignored "-Wmacro-redefined" #endif #if __has_warning("-Wcast-qual") #pragma clang diagnostic ignored "-Wcast-qual" #endif #if __has_warning("-Wzero-as-null-pointer-constant") #pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant" #endif #if __has_warning("-Wtautological-constant-compare") #pragma clang diagnostic ignored "-Wtautological-constant-compare" #endif #endif /* miniz.c v1.15 - public domain deflate/inflate, zlib-subset, ZIP reading/writing/appending, PNG writing See "unlicense" statement at the end of this file. Rich Geldreich <richgel99@gmail.com>, last updated Oct. 13, 2013 Implements RFC 1950: http://www.ietf.org/rfc/rfc1950.txt and RFC 1951: http://www.ietf.org/rfc/rfc1951.txt Most API's defined in miniz.c are optional. For example, to disable the archive related functions just define MINIZ_NO_ARCHIVE_APIS, or to get rid of all stdio usage define MINIZ_NO_STDIO (see the list below for more macros). * Change History 10/13/13 v1.15 r4 - Interim bugfix release while I work on the next major release with Zip64 support (almost there!): - Critical fix for the MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY bug (thanks kahmyong.moon@hp.com) which could cause locate files to not find files. This bug would only have occured in earlier versions if you explicitly used this flag, OR if you used mz_zip_extract_archive_file_to_heap() or mz_zip_add_mem_to_archive_file_in_place() (which used this flag). If you can't switch to v1.15 but want to fix this bug, just remove the uses of this flag from both helper funcs (and of course don't use the flag). - Bugfix in mz_zip_reader_extract_to_mem_no_alloc() from kymoon when pUser_read_buf is not NULL and compressed size is > uncompressed size - Fixing mz_zip_reader_extract_*() funcs so they don't try to extract compressed data from directory entries, to account for weird zipfiles which contain zero-size compressed data on dir entries. Hopefully this fix won't cause any issues on weird zip archives, because it assumes the low 16-bits of zip external attributes are DOS attributes (which I believe they always are in practice). - Fixing mz_zip_reader_is_file_a_directory() so it doesn't check the internal attributes, just the filename and external attributes - mz_zip_reader_init_file() - missing MZ_FCLOSE() call if the seek failed - Added cmake support for Linux builds which builds all the examples, tested with clang v3.3 and gcc v4.6. - Clang fix for tdefl_write_image_to_png_file_in_memory() from toffaletti - Merged MZ_FORCEINLINE fix from hdeanclark - Fix <time.h> include before config #ifdef, thanks emil.brink - Added tdefl_write_image_to_png_file_in_memory_ex(): supports Y flipping (super useful for OpenGL apps), and explicit control over the compression level (so you can set it to 1 for real-time compression). - Merged in some compiler fixes from paulharris's github repro. - Retested this build under Windows (VS 2010, including static analysis), tcc 0.9.26, gcc v4.6 and clang v3.3. - Added example6.c, which dumps an image of the mandelbrot set to a PNG file. - Modified example2 to help test the MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY flag more. - In r3: Bugfix to mz_zip_writer_add_file() found during merge: Fix possible src file fclose() leak if alignment bytes+local header file write faiiled - In r4: Minor bugfix to mz_zip_writer_add_from_zip_reader(): Was pushing the wrong central dir header offset, appears harmless in this release, but it became a problem in the zip64 branch 5/20/12 v1.14 - MinGW32/64 GCC 4.6.1 compiler fixes: added MZ_FORCEINLINE, #include <time.h> (thanks fermtect). 5/19/12 v1.13 - From jason@cornsyrup.org and kelwert@mtu.edu - Fix mz_crc32() so it doesn't compute the wrong CRC-32's when mz_ulong is 64-bit. - Temporarily/locally slammed in "typedef unsigned long mz_ulong" and re-ran a randomized regression test on ~500k files. - Eliminated a bunch of warnings when compiling with GCC 32-bit/64. - Ran all examples, miniz.c, and tinfl.c through MSVC 2008's /analyze (static analysis) option and fixed all warnings (except for the silly "Use of the comma-operator in a tested expression.." analysis warning, which I purposely use to work around a MSVC compiler warning). - Created 32-bit and 64-bit Codeblocks projects/workspace. Built and tested Linux executables. The codeblocks workspace is compatible with Linux+Win32/x64. - Added miniz_tester solution/project, which is a useful little app derived from LZHAM's tester app that I use as part of the regression test. - Ran miniz.c and tinfl.c through another series of regression testing on ~500,000 files and archives. - Modified example5.c so it purposely disables a bunch of high-level functionality (MINIZ_NO_STDIO, etc.). (Thanks to corysama for the MINIZ_NO_STDIO bug report.) - Fix ftell() usage in examples so they exit with an error on files which are too large (a limitation of the examples, not miniz itself). 4/12/12 v1.12 - More comments, added low-level example5.c, fixed a couple minor level_and_flags issues in the archive API's. level_and_flags can now be set to MZ_DEFAULT_COMPRESSION. Thanks to Bruce Dawson <bruced@valvesoftware.com> for the feedback/bug report. 5/28/11 v1.11 - Added statement from unlicense.org 5/27/11 v1.10 - Substantial compressor optimizations: - Level 1 is now ~4x faster than before. The L1 compressor's throughput now varies between 70-110MB/sec. on a - Core i7 (actual throughput varies depending on the type of data, and x64 vs. x86). - Improved baseline L2-L9 compression perf. Also, greatly improved compression perf. issues on some file types. - Refactored the compression code for better readability and maintainability. - Added level 10 compression level (L10 has slightly better ratio than level 9, but could have a potentially large drop in throughput on some files). 5/15/11 v1.09 - Initial stable release. * Low-level Deflate/Inflate implementation notes: Compression: Use the "tdefl" API's. The compressor supports raw, static, and dynamic blocks, lazy or greedy parsing, match length filtering, RLE-only, and Huffman-only streams. It performs and compresses approximately as well as zlib. Decompression: Use the "tinfl" API's. The entire decompressor is implemented as a single function coroutine: see tinfl_decompress(). It supports decompression into a 32KB (or larger power of 2) wrapping buffer, or into a memory block large enough to hold the entire file. The low-level tdefl/tinfl API's do not make any use of dynamic memory allocation. * zlib-style API notes: miniz.c implements a fairly large subset of zlib. There's enough functionality present for it to be a drop-in zlib replacement in many apps: The z_stream struct, optional memory allocation callbacks deflateInit/deflateInit2/deflate/deflateReset/deflateEnd/deflateBound inflateInit/inflateInit2/inflate/inflateEnd compress, compress2, compressBound, uncompress CRC-32, Adler-32 - Using modern, minimal code size, CPU cache friendly routines. Supports raw deflate streams or standard zlib streams with adler-32 checking. Limitations: The callback API's are not implemented yet. No support for gzip headers or zlib static dictionaries. I've tried to closely emulate zlib's various flavors of stream flushing and return status codes, but there are no guarantees that miniz.c pulls this off perfectly. * PNG writing: See the tdefl_write_image_to_png_file_in_memory() function, originally written by Alex Evans. Supports 1-4 bytes/pixel images. * ZIP archive API notes: The ZIP archive API's where designed with simplicity and efficiency in mind, with just enough abstraction to get the job done with minimal fuss. There are simple API's to retrieve file information, read files from existing archives, create new archives, append new files to existing archives, or clone archive data from one archive to another. It supports archives located in memory or the heap, on disk (using stdio.h), or you can specify custom file read/write callbacks. - Archive reading: Just call this function to read a single file from a disk archive: void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, const char *pArchive_name, size_t *pSize, mz_uint zip_flags); For more complex cases, use the "mz_zip_reader" functions. Upon opening an archive, the entire central directory is located and read as-is into memory, and subsequent file access only occurs when reading individual files. - Archives file scanning: The simple way is to use this function to scan a loaded archive for a specific file: int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName, const char *pComment, mz_uint flags); The locate operation can optionally check file comments too, which (as one example) can be used to identify multiple versions of the same file in an archive. This function uses a simple linear search through the central directory, so it's not very fast. Alternately, you can iterate through all the files in an archive (using mz_zip_reader_get_num_files()) and retrieve detailed info on each file by calling mz_zip_reader_file_stat(). - Archive creation: Use the "mz_zip_writer" functions. The ZIP writer immediately writes compressed file data to disk and builds an exact image of the central directory in memory. The central directory image is written all at once at the end of the archive file when the archive is finalized. The archive writer can optionally align each file's local header and file data to any power of 2 alignment, which can be useful when the archive will be read from optical media. Also, the writer supports placing arbitrary data blobs at the very beginning of ZIP archives. Archives written using either feature are still readable by any ZIP tool. - Archive appending: The simple way to add a single file to an archive is to call this function: mz_bool mz_zip_add_mem_to_archive_file_in_place(const char *pZip_filename, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags); The archive will be created if it doesn't already exist, otherwise it'll be appended to. Note the appending is done in-place and is not an atomic operation, so if something goes wrong during the operation it's possible the archive could be left without a central directory (although the local file headers and file data will be fine, so the archive will be recoverable). For more complex archive modification scenarios: 1. The safest way is to use a mz_zip_reader to read the existing archive, cloning only those bits you want to preserve into a new archive using using the mz_zip_writer_add_from_zip_reader() function (which compiles the compressed file data as-is). When you're done, delete the old archive and rename the newly written archive, and you're done. This is safe but requires a bunch of temporary disk space or heap memory. 2. Or, you can convert an mz_zip_reader in-place to an mz_zip_writer using mz_zip_writer_init_from_reader(), append new files as needed, then finalize the archive which will write an updated central directory to the original archive. (This is basically what mz_zip_add_mem_to_archive_file_in_place() does.) There's a possibility that the archive's central directory could be lost with this method if anything goes wrong, though. - ZIP archive support limitations: No zip64 or spanning support. Extraction functions can only handle unencrypted, stored or deflated files. Requires streams capable of seeking. * This is a header file library, like stb_image.c. To get only a header file, either cut and paste the below header, or create miniz.h, #define MINIZ_HEADER_FILE_ONLY, and then include miniz.c from it. * Important: For best perf. be sure to customize the below macros for your target platform: #define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1 #define MINIZ_LITTLE_ENDIAN 1 #define MINIZ_HAS_64BIT_REGISTERS 1 * On platforms using glibc, Be sure to "#define _LARGEFILE64_SOURCE 1" before including miniz.c to ensure miniz uses the 64-bit variants: fopen64(), stat64(), etc. Otherwise you won't be able to process large files (i.e. 32-bit stat() fails for me on files > 0x7FFFFFFF bytes). */ #ifndef MINIZ_HEADER_INCLUDED #define MINIZ_HEADER_INCLUDED //#include <stdlib.h> // Defines to completely disable specific portions of miniz.c: // If all macros here are defined the only functionality remaining will be // CRC-32, adler-32, tinfl, and tdefl. // Define MINIZ_NO_STDIO to disable all usage and any functions which rely on // stdio for file I/O. //#define MINIZ_NO_STDIO // If MINIZ_NO_TIME is specified then the ZIP archive functions will not be able // to get the current time, or // get/set file times, and the C run-time funcs that get/set times won't be // called. // The current downside is the times written to your archives will be from 1979. #define MINIZ_NO_TIME // Define MINIZ_NO_ARCHIVE_APIS to disable all ZIP archive API's. #define MINIZ_NO_ARCHIVE_APIS // Define MINIZ_NO_ARCHIVE_APIS to disable all writing related ZIP archive // API's. //#define MINIZ_NO_ARCHIVE_WRITING_APIS // Define MINIZ_NO_ZLIB_APIS to remove all ZLIB-style compression/decompression // API's. //#define MINIZ_NO_ZLIB_APIS // Define MINIZ_NO_ZLIB_COMPATIBLE_NAME to disable zlib names, to prevent // conflicts against stock zlib. //#define MINIZ_NO_ZLIB_COMPATIBLE_NAMES // Define MINIZ_NO_MALLOC to disable all calls to malloc, free, and realloc. // Note if MINIZ_NO_MALLOC is defined then the user must always provide custom // user alloc/free/realloc // callbacks to the zlib and archive API's, and a few stand-alone helper API's // which don't provide custom user // functions (such as tdefl_compress_mem_to_heap() and // tinfl_decompress_mem_to_heap()) won't work. //#define MINIZ_NO_MALLOC #if defined(__TINYC__) && (defined(__linux) || defined(__linux__)) // TODO: Work around "error: include file 'sys\utime.h' when compiling with tcc // on Linux #define MINIZ_NO_TIME #endif #if !defined(MINIZ_NO_TIME) && !defined(MINIZ_NO_ARCHIVE_APIS) //#include <time.h> #endif #if defined(_M_IX86) || defined(_M_X64) || defined(__i386__) || \ defined(__i386) || defined(__i486__) || defined(__i486) || \ defined(i386) || defined(__ia64__) || defined(__x86_64__) // MINIZ_X86_OR_X64_CPU is only used to help set the below macros. #define MINIZ_X86_OR_X64_CPU 1 #endif #if defined(__sparcv9) // Big endian #else #if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || MINIZ_X86_OR_X64_CPU // Set MINIZ_LITTLE_ENDIAN to 1 if the processor is little endian. #define MINIZ_LITTLE_ENDIAN 1 #endif #endif #if MINIZ_X86_OR_X64_CPU // Set MINIZ_USE_UNALIGNED_LOADS_AND_STORES to 1 on CPU's that permit efficient // integer loads and stores from unaligned addresses. //#define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1 #define MINIZ_USE_UNALIGNED_LOADS_AND_STORES \ 0 // disable to suppress compiler warnings #endif #if defined(_M_X64) || defined(_WIN64) || defined(__MINGW64__) || \ defined(_LP64) || defined(__LP64__) || defined(__ia64__) || \ defined(__x86_64__) // Set MINIZ_HAS_64BIT_REGISTERS to 1 if operations on 64-bit integers are // reasonably fast (and don't involve compiler generated calls to helper // functions). #define MINIZ_HAS_64BIT_REGISTERS 1 #endif #ifdef __cplusplus extern "C" { #endif // ------------------- zlib-style API Definitions. // For more compatibility with zlib, miniz.c uses unsigned long for some // parameters/struct members. Beware: mz_ulong can be either 32 or 64-bits! typedef unsigned long mz_ulong; // mz_free() internally uses the MZ_FREE() macro (which by default calls free() // unless you've modified the MZ_MALLOC macro) to release a block allocated from // the heap. void mz_free(void *p); #define MZ_ADLER32_INIT (1) // mz_adler32() returns the initial adler-32 value to use when called with // ptr==NULL. mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len); #define MZ_CRC32_INIT (0) // mz_crc32() returns the initial CRC-32 value to use when called with // ptr==NULL. mz_ulong mz_crc32(mz_ulong crc, const unsigned char *ptr, size_t buf_len); // Compression strategies. enum { MZ_DEFAULT_STRATEGY = 0, MZ_FILTERED = 1, MZ_HUFFMAN_ONLY = 2, MZ_RLE = 3, MZ_FIXED = 4 }; // Method #define MZ_DEFLATED 8 #ifndef MINIZ_NO_ZLIB_APIS // Heap allocation callbacks. // Note that mz_alloc_func parameter types purpsosely differ from zlib's: // items/size is size_t, not unsigned long. typedef void *(*mz_alloc_func)(void *opaque, size_t items, size_t size); typedef void (*mz_free_func)(void *opaque, void *address); typedef void *(*mz_realloc_func)(void *opaque, void *address, size_t items, size_t size); #define MZ_VERSION "9.1.15" #define MZ_VERNUM 0x91F0 #define MZ_VER_MAJOR 9 #define MZ_VER_MINOR 1 #define MZ_VER_REVISION 15 #define MZ_VER_SUBREVISION 0 // Flush values. For typical usage you only need MZ_NO_FLUSH and MZ_FINISH. The // other values are for advanced use (refer to the zlib docs). enum { MZ_NO_FLUSH = 0, MZ_PARTIAL_FLUSH = 1, MZ_SYNC_FLUSH = 2, MZ_FULL_FLUSH = 3, MZ_FINISH = 4, MZ_BLOCK = 5 }; // Return status codes. MZ_PARAM_ERROR is non-standard. enum { MZ_OK = 0, MZ_STREAM_END = 1, MZ_NEED_DICT = 2, MZ_ERRNO = -1, MZ_STREAM_ERROR = -2, MZ_DATA_ERROR = -3, MZ_MEM_ERROR = -4, MZ_BUF_ERROR = -5, MZ_VERSION_ERROR = -6, MZ_PARAM_ERROR = -10000 }; // Compression levels: 0-9 are the standard zlib-style levels, 10 is best // possible compression (not zlib compatible, and may be very slow), // MZ_DEFAULT_COMPRESSION=MZ_DEFAULT_LEVEL. enum { MZ_NO_COMPRESSION = 0, MZ_BEST_SPEED = 1, MZ_BEST_COMPRESSION = 9, MZ_UBER_COMPRESSION = 10, MZ_DEFAULT_LEVEL = 6, MZ_DEFAULT_COMPRESSION = -1 }; // Window bits #define MZ_DEFAULT_WINDOW_BITS 15 struct mz_internal_state; // Compression/decompression stream struct. typedef struct mz_stream_s { const unsigned char *next_in; // pointer to next byte to read unsigned int avail_in; // number of bytes available at next_in mz_ulong total_in; // total number of bytes consumed so far unsigned char *next_out; // pointer to next byte to write unsigned int avail_out; // number of bytes that can be written to next_out mz_ulong total_out; // total number of bytes produced so far char *msg; // error msg (unused) struct mz_internal_state *state; // internal state, allocated by zalloc/zfree mz_alloc_func zalloc; // optional heap allocation function (defaults to malloc) mz_free_func zfree; // optional heap free function (defaults to free) void *opaque; // heap alloc function user pointer int data_type; // data_type (unused) mz_ulong adler; // adler32 of the source or uncompressed data mz_ulong reserved; // not used } mz_stream; typedef mz_stream *mz_streamp; // Returns the version string of miniz.c. const char *mz_version(void); // mz_deflateInit() initializes a compressor with default options: // Parameters: // pStream must point to an initialized mz_stream struct. // level must be between [MZ_NO_COMPRESSION, MZ_BEST_COMPRESSION]. // level 1 enables a specially optimized compression function that's been // optimized purely for performance, not ratio. // (This special func. is currently only enabled when // MINIZ_USE_UNALIGNED_LOADS_AND_STORES and MINIZ_LITTLE_ENDIAN are defined.) // Return values: // MZ_OK on success. // MZ_STREAM_ERROR if the stream is bogus. // MZ_PARAM_ERROR if the input parameters are bogus. // MZ_MEM_ERROR on out of memory. int mz_deflateInit(mz_streamp pStream, int level); // mz_deflateInit2() is like mz_deflate(), except with more control: // Additional parameters: // method must be MZ_DEFLATED // window_bits must be MZ_DEFAULT_WINDOW_BITS (to wrap the deflate stream with // zlib header/adler-32 footer) or -MZ_DEFAULT_WINDOW_BITS (raw deflate/no // header or footer) // mem_level must be between [1, 9] (it's checked but ignored by miniz.c) int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits, int mem_level, int strategy); // Quickly resets a compressor without having to reallocate anything. Same as // calling mz_deflateEnd() followed by mz_deflateInit()/mz_deflateInit2(). int mz_deflateReset(mz_streamp pStream); // mz_deflate() compresses the input to output, consuming as much of the input // and producing as much output as possible. // Parameters: // pStream is the stream to read from and write to. You must initialize/update // the next_in, avail_in, next_out, and avail_out members. // flush may be MZ_NO_FLUSH, MZ_PARTIAL_FLUSH/MZ_SYNC_FLUSH, MZ_FULL_FLUSH, or // MZ_FINISH. // Return values: // MZ_OK on success (when flushing, or if more input is needed but not // available, and/or there's more output to be written but the output buffer // is full). // MZ_STREAM_END if all input has been consumed and all output bytes have been // written. Don't call mz_deflate() on the stream anymore. // MZ_STREAM_ERROR if the stream is bogus. // MZ_PARAM_ERROR if one of the parameters is invalid. // MZ_BUF_ERROR if no forward progress is possible because the input and/or // output buffers are empty. (Fill up the input buffer or free up some output // space and try again.) int mz_deflate(mz_streamp pStream, int flush); // mz_deflateEnd() deinitializes a compressor: // Return values: // MZ_OK on success. // MZ_STREAM_ERROR if the stream is bogus. int mz_deflateEnd(mz_streamp pStream); // mz_deflateBound() returns a (very) conservative upper bound on the amount of // data that could be generated by deflate(), assuming flush is set to only // MZ_NO_FLUSH or MZ_FINISH. mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len); // Single-call compression functions mz_compress() and mz_compress2(): // Returns MZ_OK on success, or one of the error codes from mz_deflate() on // failure. int mz_compress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len); int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len, int level); // mz_compressBound() returns a (very) conservative upper bound on the amount of // data that could be generated by calling mz_compress(). mz_ulong mz_compressBound(mz_ulong source_len); // Initializes a decompressor. int mz_inflateInit(mz_streamp pStream); // mz_inflateInit2() is like mz_inflateInit() with an additional option that // controls the window size and whether or not the stream has been wrapped with // a zlib header/footer: // window_bits must be MZ_DEFAULT_WINDOW_BITS (to parse zlib header/footer) or // -MZ_DEFAULT_WINDOW_BITS (raw deflate). int mz_inflateInit2(mz_streamp pStream, int window_bits); // Decompresses the input stream to the output, consuming only as much of the // input as needed, and writing as much to the output as possible. // Parameters: // pStream is the stream to read from and write to. You must initialize/update // the next_in, avail_in, next_out, and avail_out members. // flush may be MZ_NO_FLUSH, MZ_SYNC_FLUSH, or MZ_FINISH. // On the first call, if flush is MZ_FINISH it's assumed the input and output // buffers are both sized large enough to decompress the entire stream in a // single call (this is slightly faster). // MZ_FINISH implies that there are no more source bytes available beside // what's already in the input buffer, and that the output buffer is large // enough to hold the rest of the decompressed data. // Return values: // MZ_OK on success. Either more input is needed but not available, and/or // there's more output to be written but the output buffer is full. // MZ_STREAM_END if all needed input has been consumed and all output bytes // have been written. For zlib streams, the adler-32 of the decompressed data // has also been verified. // MZ_STREAM_ERROR if the stream is bogus. // MZ_DATA_ERROR if the deflate stream is invalid. // MZ_PARAM_ERROR if one of the parameters is invalid. // MZ_BUF_ERROR if no forward progress is possible because the input buffer is // empty but the inflater needs more input to continue, or if the output // buffer is not large enough. Call mz_inflate() again // with more input data, or with more room in the output buffer (except when // using single call decompression, described above). int mz_inflate(mz_streamp pStream, int flush); // Deinitializes a decompressor. int mz_inflateEnd(mz_streamp pStream); // Single-call decompression. // Returns MZ_OK on success, or one of the error codes from mz_inflate() on // failure. int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len); // Returns a string description of the specified error code, or NULL if the // error code is invalid. const char *mz_error(int err); // Redefine zlib-compatible names to miniz equivalents, so miniz.c can be used // as a drop-in replacement for the subset of zlib that miniz.c supports. // Define MINIZ_NO_ZLIB_COMPATIBLE_NAMES to disable zlib-compatibility if you // use zlib in the same project. #ifndef MINIZ_NO_ZLIB_COMPATIBLE_NAMES typedef unsigned char Byte; typedef unsigned int uInt; typedef mz_ulong uLong; typedef Byte Bytef; typedef uInt uIntf; typedef char charf; typedef int intf; typedef void *voidpf; typedef uLong uLongf; typedef void *voidp; typedef void *const voidpc; #define Z_NULL 0 #define Z_NO_FLUSH MZ_NO_FLUSH #define Z_PARTIAL_FLUSH MZ_PARTIAL_FLUSH #define Z_SYNC_FLUSH MZ_SYNC_FLUSH #define Z_FULL_FLUSH MZ_FULL_FLUSH #define Z_FINISH MZ_FINISH #define Z_BLOCK MZ_BLOCK #define Z_OK MZ_OK #define Z_STREAM_END MZ_STREAM_END #define Z_NEED_DICT MZ_NEED_DICT #define Z_ERRNO MZ_ERRNO #define Z_STREAM_ERROR MZ_STREAM_ERROR #define Z_DATA_ERROR MZ_DATA_ERROR #define Z_MEM_ERROR MZ_MEM_ERROR #define Z_BUF_ERROR MZ_BUF_ERROR #define Z_VERSION_ERROR MZ_VERSION_ERROR #define Z_PARAM_ERROR MZ_PARAM_ERROR #define Z_NO_COMPRESSION MZ_NO_COMPRESSION #define Z_BEST_SPEED MZ_BEST_SPEED #define Z_BEST_COMPRESSION MZ_BEST_COMPRESSION #define Z_DEFAULT_COMPRESSION MZ_DEFAULT_COMPRESSION #define Z_DEFAULT_STRATEGY MZ_DEFAULT_STRATEGY #define Z_FILTERED MZ_FILTERED #define Z_HUFFMAN_ONLY MZ_HUFFMAN_ONLY #define Z_RLE MZ_RLE #define Z_FIXED MZ_FIXED #define Z_DEFLATED MZ_DEFLATED #define Z_DEFAULT_WINDOW_BITS MZ_DEFAULT_WINDOW_BITS #define alloc_func mz_alloc_func #define free_func mz_free_func #define internal_state mz_internal_state #define z_stream mz_stream #define deflateInit mz_deflateInit #define deflateInit2 mz_deflateInit2 #define deflateReset mz_deflateReset #define deflate mz_deflate #define deflateEnd mz_deflateEnd #define deflateBound mz_deflateBound #define compress mz_compress #define compress2 mz_compress2 #define compressBound mz_compressBound #define inflateInit mz_inflateInit #define inflateInit2 mz_inflateInit2 #define inflate mz_inflate #define inflateEnd mz_inflateEnd #define uncompress mz_uncompress #define crc32 mz_crc32 #define adler32 mz_adler32 #define MAX_WBITS 15 #define MAX_MEM_LEVEL 9 #define zError mz_error #define ZLIB_VERSION MZ_VERSION #define ZLIB_VERNUM MZ_VERNUM #define ZLIB_VER_MAJOR MZ_VER_MAJOR #define ZLIB_VER_MINOR MZ_VER_MINOR #define ZLIB_VER_REVISION MZ_VER_REVISION #define ZLIB_VER_SUBREVISION MZ_VER_SUBREVISION #define zlibVersion mz_version #define zlib_version mz_version() #endif // #ifndef MINIZ_NO_ZLIB_COMPATIBLE_NAMES #endif // MINIZ_NO_ZLIB_APIS // ------------------- Types and macros typedef unsigned char mz_uint8; typedef signed short mz_int16; typedef unsigned short mz_uint16; typedef unsigned int mz_uint32; typedef unsigned int mz_uint; typedef long long mz_int64; typedef unsigned long long mz_uint64; typedef int mz_bool; #define MZ_FALSE (0) #define MZ_TRUE (1) // An attempt to work around MSVC's spammy "warning C4127: conditional // expression is constant" message. #ifdef _MSC_VER #define MZ_MACRO_END while (0, 0) #else #define MZ_MACRO_END while (0) #endif // ------------------- ZIP archive reading/writing #ifndef MINIZ_NO_ARCHIVE_APIS enum { MZ_ZIP_MAX_IO_BUF_SIZE = 64 * 1024, MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE = 260, MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE = 256 }; typedef struct { mz_uint32 m_file_index; mz_uint32 m_central_dir_ofs; mz_uint16 m_version_made_by; mz_uint16 m_version_needed; mz_uint16 m_bit_flag; mz_uint16 m_method; #ifndef MINIZ_NO_TIME time_t m_time; #endif mz_uint32 m_crc32; mz_uint64 m_comp_size; mz_uint64 m_uncomp_size; mz_uint16 m_internal_attr; mz_uint32 m_external_attr; mz_uint64 m_local_header_ofs; mz_uint32 m_comment_size; char m_filename[MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE]; char m_comment[MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE]; } mz_zip_archive_file_stat; typedef size_t (*mz_file_read_func)(void *pOpaque, mz_uint64 file_ofs, void *pBuf, size_t n); typedef size_t (*mz_file_write_func)(void *pOpaque, mz_uint64 file_ofs, const void *pBuf, size_t n); struct mz_zip_internal_state_tag; typedef struct mz_zip_internal_state_tag mz_zip_internal_state; typedef enum { MZ_ZIP_MODE_INVALID = 0, MZ_ZIP_MODE_READING = 1, MZ_ZIP_MODE_WRITING = 2, MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED = 3 } mz_zip_mode; typedef struct mz_zip_archive_tag { mz_uint64 m_archive_size; mz_uint64 m_central_directory_file_ofs; mz_uint m_total_files; mz_zip_mode m_zip_mode; mz_uint m_file_offset_alignment; mz_alloc_func m_pAlloc; mz_free_func m_pFree; mz_realloc_func m_pRealloc; void *m_pAlloc_opaque; mz_file_read_func m_pRead; mz_file_write_func m_pWrite; void *m_pIO_opaque; mz_zip_internal_state *m_pState; } mz_zip_archive; typedef enum { MZ_ZIP_FLAG_CASE_SENSITIVE = 0x0100, MZ_ZIP_FLAG_IGNORE_PATH = 0x0200, MZ_ZIP_FLAG_COMPRESSED_DATA = 0x0400, MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY = 0x0800 } mz_zip_flags; // ZIP archive reading // Inits a ZIP archive reader. // These functions read and validate the archive's central directory. mz_bool mz_zip_reader_init(mz_zip_archive *pZip, mz_uint64 size, mz_uint32 flags); mz_bool mz_zip_reader_init_mem(mz_zip_archive *pZip, const void *pMem, size_t size, mz_uint32 flags); #ifndef MINIZ_NO_STDIO mz_bool mz_zip_reader_init_file(mz_zip_archive *pZip, const char *pFilename, mz_uint32 flags); #endif // Returns the total number of files in the archive. mz_uint mz_zip_reader_get_num_files(mz_zip_archive *pZip); // Returns detailed information about an archive file entry. mz_bool mz_zip_reader_file_stat(mz_zip_archive *pZip, mz_uint file_index, mz_zip_archive_file_stat *pStat); // Determines if an archive file entry is a directory entry. mz_bool mz_zip_reader_is_file_a_directory(mz_zip_archive *pZip, mz_uint file_index); mz_bool mz_zip_reader_is_file_encrypted(mz_zip_archive *pZip, mz_uint file_index); // Retrieves the filename of an archive file entry. // Returns the number of bytes written to pFilename, or if filename_buf_size is // 0 this function returns the number of bytes needed to fully store the // filename. mz_uint mz_zip_reader_get_filename(mz_zip_archive *pZip, mz_uint file_index, char *pFilename, mz_uint filename_buf_size); // Attempts to locates a file in the archive's central directory. // Valid flags: MZ_ZIP_FLAG_CASE_SENSITIVE, MZ_ZIP_FLAG_IGNORE_PATH // Returns -1 if the file cannot be found. int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName, const char *pComment, mz_uint flags); // Extracts a archive file to a memory buffer using no memory allocation. mz_bool mz_zip_reader_extract_to_mem_no_alloc(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size); mz_bool mz_zip_reader_extract_file_to_mem_no_alloc( mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size); // Extracts a archive file to a memory buffer. mz_bool mz_zip_reader_extract_to_mem(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags); mz_bool mz_zip_reader_extract_file_to_mem(mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, mz_uint flags); // Extracts a archive file to a dynamically allocated heap buffer. void *mz_zip_reader_extract_to_heap(mz_zip_archive *pZip, mz_uint file_index, size_t *pSize, mz_uint flags); void *mz_zip_reader_extract_file_to_heap(mz_zip_archive *pZip, const char *pFilename, size_t *pSize, mz_uint flags); // Extracts a archive file using a callback function to output the file's data. mz_bool mz_zip_reader_extract_to_callback(mz_zip_archive *pZip, mz_uint file_index, mz_file_write_func pCallback, void *pOpaque, mz_uint flags); mz_bool mz_zip_reader_extract_file_to_callback(mz_zip_archive *pZip, const char *pFilename, mz_file_write_func pCallback, void *pOpaque, mz_uint flags); #ifndef MINIZ_NO_STDIO // Extracts a archive file to a disk file and sets its last accessed and // modified times. // This function only extracts files, not archive directory records. mz_bool mz_zip_reader_extract_to_file(mz_zip_archive *pZip, mz_uint file_index, const char *pDst_filename, mz_uint flags); mz_bool mz_zip_reader_extract_file_to_file(mz_zip_archive *pZip, const char *pArchive_filename, const char *pDst_filename, mz_uint flags); #endif // Ends archive reading, freeing all allocations, and closing the input archive // file if mz_zip_reader_init_file() was used. mz_bool mz_zip_reader_end(mz_zip_archive *pZip); // ZIP archive writing #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS // Inits a ZIP archive writer. mz_bool mz_zip_writer_init(mz_zip_archive *pZip, mz_uint64 existing_size); mz_bool mz_zip_writer_init_heap(mz_zip_archive *pZip, size_t size_to_reserve_at_beginning, size_t initial_allocation_size); #ifndef MINIZ_NO_STDIO mz_bool mz_zip_writer_init_file(mz_zip_archive *pZip, const char *pFilename, mz_uint64 size_to_reserve_at_beginning); #endif // Converts a ZIP archive reader object into a writer object, to allow efficient // in-place file appends to occur on an existing archive. // For archives opened using mz_zip_reader_init_file, pFilename must be the // archive's filename so it can be reopened for writing. If the file can't be // reopened, mz_zip_reader_end() will be called. // For archives opened using mz_zip_reader_init_mem, the memory block must be // growable using the realloc callback (which defaults to realloc unless you've // overridden it). // Finally, for archives opened using mz_zip_reader_init, the mz_zip_archive's // user provided m_pWrite function cannot be NULL. // Note: In-place archive modification is not recommended unless you know what // you're doing, because if execution stops or something goes wrong before // the archive is finalized the file's central directory will be hosed. mz_bool mz_zip_writer_init_from_reader(mz_zip_archive *pZip, const char *pFilename); // Adds the contents of a memory buffer to an archive. These functions record // the current local time into the archive. // To add a directory entry, call this method with an archive name ending in a // forwardslash with empty buffer. // level_and_flags - compression level (0-10, see MZ_BEST_SPEED, // MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or // just set to MZ_DEFAULT_COMPRESSION. mz_bool mz_zip_writer_add_mem(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, mz_uint level_and_flags); mz_bool mz_zip_writer_add_mem_ex(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags, mz_uint64 uncomp_size, mz_uint32 uncomp_crc32); #ifndef MINIZ_NO_STDIO // Adds the contents of a disk file to an archive. This function also records // the disk file's modified time into the archive. // level_and_flags - compression level (0-10, see MZ_BEST_SPEED, // MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or // just set to MZ_DEFAULT_COMPRESSION. mz_bool mz_zip_writer_add_file(mz_zip_archive *pZip, const char *pArchive_name, const char *pSrc_filename, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags); #endif // Adds a file to an archive by fully cloning the data from another archive. // This function fully clones the source file's compressed data (no // recompression), along with its full filename, extra data, and comment fields. mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip, mz_zip_archive *pSource_zip, mz_uint file_index); // Finalizes the archive by writing the central directory records followed by // the end of central directory record. // After an archive is finalized, the only valid call on the mz_zip_archive // struct is mz_zip_writer_end(). // An archive must be manually finalized by calling this function for it to be // valid. mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip); mz_bool mz_zip_writer_finalize_heap_archive(mz_zip_archive *pZip, void **pBuf, size_t *pSize); // Ends archive writing, freeing all allocations, and closing the output file if // mz_zip_writer_init_file() was used. // Note for the archive to be valid, it must have been finalized before ending. mz_bool mz_zip_writer_end(mz_zip_archive *pZip); // Misc. high-level helper functions: // mz_zip_add_mem_to_archive_file_in_place() efficiently (but not atomically) // appends a memory blob to a ZIP archive. // level_and_flags - compression level (0-10, see MZ_BEST_SPEED, // MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or // just set to MZ_DEFAULT_COMPRESSION. mz_bool mz_zip_add_mem_to_archive_file_in_place( const char *pZip_filename, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags); // Reads a single file from an archive into a heap block. // Returns NULL on failure. void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, const char *pArchive_name, size_t *pSize, mz_uint zip_flags); #endif // #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS #endif // #ifndef MINIZ_NO_ARCHIVE_APIS // ------------------- Low-level Decompression API Definitions // Decompression flags used by tinfl_decompress(). // TINFL_FLAG_PARSE_ZLIB_HEADER: If set, the input has a valid zlib header and // ends with an adler32 checksum (it's a valid zlib stream). Otherwise, the // input is a raw deflate stream. // TINFL_FLAG_HAS_MORE_INPUT: If set, there are more input bytes available // beyond the end of the supplied input buffer. If clear, the input buffer // contains all remaining input. // TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF: If set, the output buffer is large // enough to hold the entire decompressed stream. If clear, the output buffer is // at least the size of the dictionary (typically 32KB). // TINFL_FLAG_COMPUTE_ADLER32: Force adler-32 checksum computation of the // decompressed bytes. enum { TINFL_FLAG_PARSE_ZLIB_HEADER = 1, TINFL_FLAG_HAS_MORE_INPUT = 2, TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF = 4, TINFL_FLAG_COMPUTE_ADLER32 = 8 }; // High level decompression functions: // tinfl_decompress_mem_to_heap() decompresses a block in memory to a heap block // allocated via malloc(). // On entry: // pSrc_buf, src_buf_len: Pointer and size of the Deflate or zlib source data // to decompress. // On return: // Function returns a pointer to the decompressed data, or NULL on failure. // *pOut_len will be set to the decompressed data's size, which could be larger // than src_buf_len on uncompressible data. // The caller must call mz_free() on the returned block when it's no longer // needed. void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags); // tinfl_decompress_mem_to_mem() decompresses a block in memory to another block // in memory. // Returns TINFL_DECOMPRESS_MEM_TO_MEM_FAILED on failure, or the number of bytes // written on success. #define TINFL_DECOMPRESS_MEM_TO_MEM_FAILED ((size_t)(-1)) size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags); // tinfl_decompress_mem_to_callback() decompresses a block in memory to an // internal 32KB buffer, and a user provided callback function will be called to // flush the buffer. // Returns 1 on success or 0 on failure. typedef int (*tinfl_put_buf_func_ptr)(const void *pBuf, int len, void *pUser); int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size, tinfl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags); struct tinfl_decompressor_tag; typedef struct tinfl_decompressor_tag tinfl_decompressor; // Max size of LZ dictionary. #define TINFL_LZ_DICT_SIZE 32768 // Return status. typedef enum { TINFL_STATUS_BAD_PARAM = -3, TINFL_STATUS_ADLER32_MISMATCH = -2, TINFL_STATUS_FAILED = -1, TINFL_STATUS_DONE = 0, TINFL_STATUS_NEEDS_MORE_INPUT = 1, TINFL_STATUS_HAS_MORE_OUTPUT = 2 } tinfl_status; // Initializes the decompressor to its initial state. #define tinfl_init(r) \ do { \ (r)->m_state = 0; \ } \ MZ_MACRO_END #define tinfl_get_adler32(r) (r)->m_check_adler32 // Main low-level decompressor coroutine function. This is the only function // actually needed for decompression. All the other functions are just // high-level helpers for improved usability. // This is a universal API, i.e. it can be used as a building block to build any // desired higher level decompression API. In the limit case, it can be called // once per every byte input or output. tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_next, size_t *pIn_buf_size, mz_uint8 *pOut_buf_start, mz_uint8 *pOut_buf_next, size_t *pOut_buf_size, const mz_uint32 decomp_flags); // Internal/private bits follow. enum { TINFL_MAX_HUFF_TABLES = 3, TINFL_MAX_HUFF_SYMBOLS_0 = 288, TINFL_MAX_HUFF_SYMBOLS_1 = 32, TINFL_MAX_HUFF_SYMBOLS_2 = 19, TINFL_FAST_LOOKUP_BITS = 10, TINFL_FAST_LOOKUP_SIZE = 1 << TINFL_FAST_LOOKUP_BITS }; typedef struct { mz_uint8 m_code_size[TINFL_MAX_HUFF_SYMBOLS_0]; mz_int16 m_look_up[TINFL_FAST_LOOKUP_SIZE], m_tree[TINFL_MAX_HUFF_SYMBOLS_0 * 2]; } tinfl_huff_table; #if MINIZ_HAS_64BIT_REGISTERS #define TINFL_USE_64BIT_BITBUF 1 #endif #if TINFL_USE_64BIT_BITBUF typedef mz_uint64 tinfl_bit_buf_t; #define TINFL_BITBUF_SIZE (64) #else typedef mz_uint32 tinfl_bit_buf_t; #define TINFL_BITBUF_SIZE (32) #endif struct tinfl_decompressor_tag { mz_uint32 m_state, m_num_bits, m_zhdr0, m_zhdr1, m_z_adler32, m_final, m_type, m_check_adler32, m_dist, m_counter, m_num_extra, m_table_sizes[TINFL_MAX_HUFF_TABLES]; tinfl_bit_buf_t m_bit_buf; size_t m_dist_from_out_buf_start; tinfl_huff_table m_tables[TINFL_MAX_HUFF_TABLES]; mz_uint8 m_raw_header[4], m_len_codes[TINFL_MAX_HUFF_SYMBOLS_0 + TINFL_MAX_HUFF_SYMBOLS_1 + 137]; }; // ------------------- Low-level Compression API Definitions // Set TDEFL_LESS_MEMORY to 1 to use less memory (compression will be slightly // slower, and raw/dynamic blocks will be output more frequently). #define TDEFL_LESS_MEMORY 0 // tdefl_init() compression flags logically OR'd together (low 12 bits contain // the max. number of probes per dictionary search): // TDEFL_DEFAULT_MAX_PROBES: The compressor defaults to 128 dictionary probes // per dictionary search. 0=Huffman only, 1=Huffman+LZ (fastest/crap // compression), 4095=Huffman+LZ (slowest/best compression). enum { TDEFL_HUFFMAN_ONLY = 0, TDEFL_DEFAULT_MAX_PROBES = 128, TDEFL_MAX_PROBES_MASK = 0xFFF }; // TDEFL_WRITE_ZLIB_HEADER: If set, the compressor outputs a zlib header before // the deflate data, and the Adler-32 of the source data at the end. Otherwise, // you'll get raw deflate data. // TDEFL_COMPUTE_ADLER32: Always compute the adler-32 of the input data (even // when not writing zlib headers). // TDEFL_GREEDY_PARSING_FLAG: Set to use faster greedy parsing, instead of more // efficient lazy parsing. // TDEFL_NONDETERMINISTIC_PARSING_FLAG: Enable to decrease the compressor's // initialization time to the minimum, but the output may vary from run to run // given the same input (depending on the contents of memory). // TDEFL_RLE_MATCHES: Only look for RLE matches (matches with a distance of 1) // TDEFL_FILTER_MATCHES: Discards matches <= 5 chars if enabled. // TDEFL_FORCE_ALL_STATIC_BLOCKS: Disable usage of optimized Huffman tables. // TDEFL_FORCE_ALL_RAW_BLOCKS: Only use raw (uncompressed) deflate blocks. // The low 12 bits are reserved to control the max # of hash probes per // dictionary lookup (see TDEFL_MAX_PROBES_MASK). enum { TDEFL_WRITE_ZLIB_HEADER = 0x01000, TDEFL_COMPUTE_ADLER32 = 0x02000, TDEFL_GREEDY_PARSING_FLAG = 0x04000, TDEFL_NONDETERMINISTIC_PARSING_FLAG = 0x08000, TDEFL_RLE_MATCHES = 0x10000, TDEFL_FILTER_MATCHES = 0x20000, TDEFL_FORCE_ALL_STATIC_BLOCKS = 0x40000, TDEFL_FORCE_ALL_RAW_BLOCKS = 0x80000 }; // High level compression functions: // tdefl_compress_mem_to_heap() compresses a block in memory to a heap block // allocated via malloc(). // On entry: // pSrc_buf, src_buf_len: Pointer and size of source block to compress. // flags: The max match finder probes (default is 128) logically OR'd against // the above flags. Higher probes are slower but improve compression. // On return: // Function returns a pointer to the compressed data, or NULL on failure. // *pOut_len will be set to the compressed data's size, which could be larger // than src_buf_len on uncompressible data. // The caller must free() the returned block when it's no longer needed. void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags); // tdefl_compress_mem_to_mem() compresses a block in memory to another block in // memory. // Returns 0 on failure. size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags); // Compresses an image to a compressed PNG file in memory. // On entry: // pImage, w, h, and num_chans describe the image to compress. num_chans may be // 1, 2, 3, or 4. // The image pitch in bytes per scanline will be w*num_chans. The leftmost // pixel on the top scanline is stored first in memory. // level may range from [0,10], use MZ_NO_COMPRESSION, MZ_BEST_SPEED, // MZ_BEST_COMPRESSION, etc. or a decent default is MZ_DEFAULT_LEVEL // If flip is true, the image will be flipped on the Y axis (useful for OpenGL // apps). // On return: // Function returns a pointer to the compressed data, or NULL on failure. // *pLen_out will be set to the size of the PNG image file. // The caller must mz_free() the returned heap block (which will typically be // larger than *pLen_out) when it's no longer needed. void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w, int h, int num_chans, size_t *pLen_out, mz_uint level, mz_bool flip); void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h, int num_chans, size_t *pLen_out); // Output stream interface. The compressor uses this interface to write // compressed data. It'll typically be called TDEFL_OUT_BUF_SIZE at a time. typedef mz_bool (*tdefl_put_buf_func_ptr)(const void *pBuf, int len, void *pUser); // tdefl_compress_mem_to_output() compresses a block to an output stream. The // above helpers use this function internally. mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags); enum { TDEFL_MAX_HUFF_TABLES = 3, TDEFL_MAX_HUFF_SYMBOLS_0 = 288, TDEFL_MAX_HUFF_SYMBOLS_1 = 32, TDEFL_MAX_HUFF_SYMBOLS_2 = 19, TDEFL_LZ_DICT_SIZE = 32768, TDEFL_LZ_DICT_SIZE_MASK = TDEFL_LZ_DICT_SIZE - 1, TDEFL_MIN_MATCH_LEN = 3, TDEFL_MAX_MATCH_LEN = 258 }; // TDEFL_OUT_BUF_SIZE MUST be large enough to hold a single entire compressed // output block (using static/fixed Huffman codes). #if TDEFL_LESS_MEMORY enum { TDEFL_LZ_CODE_BUF_SIZE = 24 * 1024, TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13) / 10, TDEFL_MAX_HUFF_SYMBOLS = 288, TDEFL_LZ_HASH_BITS = 12, TDEFL_LEVEL1_HASH_SIZE_MASK = 4095, TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3, TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS }; #else enum { TDEFL_LZ_CODE_BUF_SIZE = 64 * 1024, TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13) / 10, TDEFL_MAX_HUFF_SYMBOLS = 288, TDEFL_LZ_HASH_BITS = 15, TDEFL_LEVEL1_HASH_SIZE_MASK = 4095, TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3, TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS }; #endif // The low-level tdefl functions below may be used directly if the above helper // functions aren't flexible enough. The low-level functions don't make any heap // allocations, unlike the above helper functions. typedef enum { TDEFL_STATUS_BAD_PARAM = -2, TDEFL_STATUS_PUT_BUF_FAILED = -1, TDEFL_STATUS_OKAY = 0, TDEFL_STATUS_DONE = 1 } tdefl_status; // Must map to MZ_NO_FLUSH, MZ_SYNC_FLUSH, etc. enums typedef enum { TDEFL_NO_FLUSH = 0, TDEFL_SYNC_FLUSH = 2, TDEFL_FULL_FLUSH = 3, TDEFL_FINISH = 4 } tdefl_flush; // tdefl's compression state structure. typedef struct { tdefl_put_buf_func_ptr m_pPut_buf_func; void *m_pPut_buf_user; mz_uint m_flags, m_max_probes[2]; int m_greedy_parsing; mz_uint m_adler32, m_lookahead_pos, m_lookahead_size, m_dict_size; mz_uint8 *m_pLZ_code_buf, *m_pLZ_flags, *m_pOutput_buf, *m_pOutput_buf_end; mz_uint m_num_flags_left, m_total_lz_bytes, m_lz_code_buf_dict_pos, m_bits_in, m_bit_buffer; mz_uint m_saved_match_dist, m_saved_match_len, m_saved_lit, m_output_flush_ofs, m_output_flush_remaining, m_finished, m_block_index, m_wants_to_finish; tdefl_status m_prev_return_status; const void *m_pIn_buf; void *m_pOut_buf; size_t *m_pIn_buf_size, *m_pOut_buf_size; tdefl_flush m_flush; const mz_uint8 *m_pSrc; size_t m_src_buf_left, m_out_buf_ofs; mz_uint8 m_dict[TDEFL_LZ_DICT_SIZE + TDEFL_MAX_MATCH_LEN - 1]; mz_uint16 m_huff_count[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS]; mz_uint16 m_huff_codes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS]; mz_uint8 m_huff_code_sizes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS]; mz_uint8 m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE]; mz_uint16 m_next[TDEFL_LZ_DICT_SIZE]; mz_uint16 m_hash[TDEFL_LZ_HASH_SIZE]; mz_uint8 m_output_buf[TDEFL_OUT_BUF_SIZE]; } tdefl_compressor; // Initializes the compressor. // There is no corresponding deinit() function because the tdefl API's do not // dynamically allocate memory. // pBut_buf_func: If NULL, output data will be supplied to the specified // callback. In this case, the user should call the tdefl_compress_buffer() API // for compression. // If pBut_buf_func is NULL the user should always call the tdefl_compress() // API. // flags: See the above enums (TDEFL_HUFFMAN_ONLY, TDEFL_WRITE_ZLIB_HEADER, // etc.) tdefl_status tdefl_init(tdefl_compressor *d, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags); // Compresses a block of data, consuming as much of the specified input buffer // as possible, and writing as much compressed data to the specified output // buffer as possible. tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf, size_t *pIn_buf_size, void *pOut_buf, size_t *pOut_buf_size, tdefl_flush flush); // tdefl_compress_buffer() is only usable when the tdefl_init() is called with a // non-NULL tdefl_put_buf_func_ptr. // tdefl_compress_buffer() always consumes the entire input buffer. tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf, size_t in_buf_size, tdefl_flush flush); tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d); mz_uint32 tdefl_get_adler32(tdefl_compressor *d); // Can't use tdefl_create_comp_flags_from_zip_params if MINIZ_NO_ZLIB_APIS isn't // defined, because it uses some of its macros. #ifndef MINIZ_NO_ZLIB_APIS // Create tdefl_compress() flags given zlib-style compression parameters. // level may range from [0,10] (where 10 is absolute max compression, but may be // much slower on some files) // window_bits may be -15 (raw deflate) or 15 (zlib) // strategy may be either MZ_DEFAULT_STRATEGY, MZ_FILTERED, MZ_HUFFMAN_ONLY, // MZ_RLE, or MZ_FIXED mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits, int strategy); #endif // #ifndef MINIZ_NO_ZLIB_APIS #ifdef __cplusplus } #endif #endif // MINIZ_HEADER_INCLUDED // ------------------- End of Header: Implementation follows. (If you only want // the header, define MINIZ_HEADER_FILE_ONLY.) #ifndef MINIZ_HEADER_FILE_ONLY typedef unsigned char mz_validate_uint16[sizeof(mz_uint16) == 2 ? 1 : -1]; typedef unsigned char mz_validate_uint32[sizeof(mz_uint32) == 4 ? 1 : -1]; typedef unsigned char mz_validate_uint64[sizeof(mz_uint64) == 8 ? 1 : -1]; //#include <assert.h> //#include <string.h> #define MZ_ASSERT(x) assert(x) #ifdef MINIZ_NO_MALLOC #define MZ_MALLOC(x) NULL #define MZ_FREE(x) (void)x, ((void)0) #define MZ_REALLOC(p, x) NULL #else #define MZ_MALLOC(x) malloc(x) #define MZ_FREE(x) free(x) #define MZ_REALLOC(p, x) realloc(p, x) #endif #define MZ_MAX(a, b) (((a) > (b)) ? (a) : (b)) #define MZ_MIN(a, b) (((a) < (b)) ? (a) : (b)) #define MZ_CLEAR_OBJ(obj) memset(&(obj), 0, sizeof(obj)) #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN #define MZ_READ_LE16(p) *((const mz_uint16 *)(p)) #define MZ_READ_LE32(p) *((const mz_uint32 *)(p)) #else #define MZ_READ_LE16(p) \ ((mz_uint32)(((const mz_uint8 *)(p))[0]) | \ ((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U)) #define MZ_READ_LE32(p) \ ((mz_uint32)(((const mz_uint8 *)(p))[0]) | \ ((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U) | \ ((mz_uint32)(((const mz_uint8 *)(p))[2]) << 16U) | \ ((mz_uint32)(((const mz_uint8 *)(p))[3]) << 24U)) #endif #ifdef _MSC_VER #define MZ_FORCEINLINE __forceinline #elif defined(__GNUC__) #define MZ_FORCEINLINE inline __attribute__((__always_inline__)) #else #define MZ_FORCEINLINE inline #endif #ifdef __cplusplus extern "C" { #endif // ------------------- zlib-style API's mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len) { mz_uint32 i, s1 = (mz_uint32)(adler & 0xffff), s2 = (mz_uint32)(adler >> 16); size_t block_len = buf_len % 5552; if (!ptr) return MZ_ADLER32_INIT; while (buf_len) { for (i = 0; i + 7 < block_len; i += 8, ptr += 8) { s1 += ptr[0], s2 += s1; s1 += ptr[1], s2 += s1; s1 += ptr[2], s2 += s1; s1 += ptr[3], s2 += s1; s1 += ptr[4], s2 += s1; s1 += ptr[5], s2 += s1; s1 += ptr[6], s2 += s1; s1 += ptr[7], s2 += s1; } for (; i < block_len; ++i) s1 += *ptr++, s2 += s1; s1 %= 65521U, s2 %= 65521U; buf_len -= block_len; block_len = 5552; } return (s2 << 16) + s1; } // Karl Malbrain's compact CRC-32. See "A compact CCITT crc16 and crc32 C // implementation that balances processor cache usage against speed": // http://www.geocities.com/malbrain/ mz_ulong mz_crc32(mz_ulong crc, const mz_uint8 *ptr, size_t buf_len) { static const mz_uint32 s_crc32[16] = { 0, 0x1db71064, 0x3b6e20c8, 0x26d930ac, 0x76dc4190, 0x6b6b51f4, 0x4db26158, 0x5005713c, 0xedb88320, 0xf00f9344, 0xd6d6a3e8, 0xcb61b38c, 0x9b64c2b0, 0x86d3d2d4, 0xa00ae278, 0xbdbdf21c}; mz_uint32 crcu32 = (mz_uint32)crc; if (!ptr) return MZ_CRC32_INIT; crcu32 = ~crcu32; while (buf_len--) { mz_uint8 b = *ptr++; crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b & 0xF)]; crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b >> 4)]; } return ~crcu32; } void mz_free(void *p) { MZ_FREE(p); } #ifndef MINIZ_NO_ZLIB_APIS static void *def_alloc_func(void *opaque, size_t items, size_t size) { (void)opaque, (void)items, (void)size; return MZ_MALLOC(items * size); } static void def_free_func(void *opaque, void *address) { (void)opaque, (void)address; MZ_FREE(address); } // static void *def_realloc_func(void *opaque, void *address, size_t items, // size_t size) { // (void)opaque, (void)address, (void)items, (void)size; // return MZ_REALLOC(address, items * size); //} const char *mz_version(void) { return MZ_VERSION; } int mz_deflateInit(mz_streamp pStream, int level) { return mz_deflateInit2(pStream, level, MZ_DEFLATED, MZ_DEFAULT_WINDOW_BITS, 9, MZ_DEFAULT_STRATEGY); } int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits, int mem_level, int strategy) { tdefl_compressor *pComp; mz_uint comp_flags = TDEFL_COMPUTE_ADLER32 | tdefl_create_comp_flags_from_zip_params(level, window_bits, strategy); if (!pStream) return MZ_STREAM_ERROR; if ((method != MZ_DEFLATED) || ((mem_level < 1) || (mem_level > 9)) || ((window_bits != MZ_DEFAULT_WINDOW_BITS) && (-window_bits != MZ_DEFAULT_WINDOW_BITS))) return MZ_PARAM_ERROR; pStream->data_type = 0; pStream->adler = MZ_ADLER32_INIT; pStream->msg = NULL; pStream->reserved = 0; pStream->total_in = 0; pStream->total_out = 0; if (!pStream->zalloc) pStream->zalloc = def_alloc_func; if (!pStream->zfree) pStream->zfree = def_free_func; pComp = (tdefl_compressor *)pStream->zalloc(pStream->opaque, 1, sizeof(tdefl_compressor)); if (!pComp) return MZ_MEM_ERROR; pStream->state = (struct mz_internal_state *)pComp; if (tdefl_init(pComp, NULL, NULL, comp_flags) != TDEFL_STATUS_OKAY) { mz_deflateEnd(pStream); return MZ_PARAM_ERROR; } return MZ_OK; } int mz_deflateReset(mz_streamp pStream) { if ((!pStream) || (!pStream->state) || (!pStream->zalloc) || (!pStream->zfree)) return MZ_STREAM_ERROR; pStream->total_in = pStream->total_out = 0; tdefl_init((tdefl_compressor *)pStream->state, NULL, NULL, ((tdefl_compressor *)pStream->state)->m_flags); return MZ_OK; } int mz_deflate(mz_streamp pStream, int flush) { size_t in_bytes, out_bytes; mz_ulong orig_total_in, orig_total_out; int mz_status = MZ_OK; if ((!pStream) || (!pStream->state) || (flush < 0) || (flush > MZ_FINISH) || (!pStream->next_out)) return MZ_STREAM_ERROR; if (!pStream->avail_out) return MZ_BUF_ERROR; if (flush == MZ_PARTIAL_FLUSH) flush = MZ_SYNC_FLUSH; if (((tdefl_compressor *)pStream->state)->m_prev_return_status == TDEFL_STATUS_DONE) return (flush == MZ_FINISH) ? MZ_STREAM_END : MZ_BUF_ERROR; orig_total_in = pStream->total_in; orig_total_out = pStream->total_out; for (;;) { tdefl_status defl_status; in_bytes = pStream->avail_in; out_bytes = pStream->avail_out; defl_status = tdefl_compress((tdefl_compressor *)pStream->state, pStream->next_in, &in_bytes, pStream->next_out, &out_bytes, (tdefl_flush)flush); pStream->next_in += (mz_uint)in_bytes; pStream->avail_in -= (mz_uint)in_bytes; pStream->total_in += (mz_uint)in_bytes; pStream->adler = tdefl_get_adler32((tdefl_compressor *)pStream->state); pStream->next_out += (mz_uint)out_bytes; pStream->avail_out -= (mz_uint)out_bytes; pStream->total_out += (mz_uint)out_bytes; if (defl_status < 0) { mz_status = MZ_STREAM_ERROR; break; } else if (defl_status == TDEFL_STATUS_DONE) { mz_status = MZ_STREAM_END; break; } else if (!pStream->avail_out) break; else if ((!pStream->avail_in) && (flush != MZ_FINISH)) { if ((flush) || (pStream->total_in != orig_total_in) || (pStream->total_out != orig_total_out)) break; return MZ_BUF_ERROR; // Can't make forward progress without some input. } } return mz_status; } int mz_deflateEnd(mz_streamp pStream) { if (!pStream) return MZ_STREAM_ERROR; if (pStream->state) { pStream->zfree(pStream->opaque, pStream->state); pStream->state = NULL; } return MZ_OK; } mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len) { (void)pStream; // This is really over conservative. (And lame, but it's actually pretty // tricky to compute a true upper bound given the way tdefl's blocking works.) return MZ_MAX(128 + (source_len * 110) / 100, 128 + source_len + ((source_len / (31 * 1024)) + 1) * 5); } int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len, int level) { int status; mz_stream stream; memset(&stream, 0, sizeof(stream)); // In case mz_ulong is 64-bits (argh I hate longs). if ((source_len | *pDest_len) > 0xFFFFFFFFU) return MZ_PARAM_ERROR; stream.next_in = pSource; stream.avail_in = (mz_uint32)source_len; stream.next_out = pDest; stream.avail_out = (mz_uint32)*pDest_len; status = mz_deflateInit(&stream, level); if (status != MZ_OK) return status; status = mz_deflate(&stream, MZ_FINISH); if (status != MZ_STREAM_END) { mz_deflateEnd(&stream); return (status == MZ_OK) ? MZ_BUF_ERROR : status; } *pDest_len = stream.total_out; return mz_deflateEnd(&stream); } int mz_compress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len) { return mz_compress2(pDest, pDest_len, pSource, source_len, MZ_DEFAULT_COMPRESSION); } mz_ulong mz_compressBound(mz_ulong source_len) { return mz_deflateBound(NULL, source_len); } typedef struct { tinfl_decompressor m_decomp; mz_uint m_dict_ofs, m_dict_avail, m_first_call, m_has_flushed; int m_window_bits; mz_uint8 m_dict[TINFL_LZ_DICT_SIZE]; tinfl_status m_last_status; } inflate_state; int mz_inflateInit2(mz_streamp pStream, int window_bits) { inflate_state *pDecomp; if (!pStream) return MZ_STREAM_ERROR; if ((window_bits != MZ_DEFAULT_WINDOW_BITS) && (-window_bits != MZ_DEFAULT_WINDOW_BITS)) return MZ_PARAM_ERROR; pStream->data_type = 0; pStream->adler = 0; pStream->msg = NULL; pStream->total_in = 0; pStream->total_out = 0; pStream->reserved = 0; if (!pStream->zalloc) pStream->zalloc = def_alloc_func; if (!pStream->zfree) pStream->zfree = def_free_func; pDecomp = (inflate_state *)pStream->zalloc(pStream->opaque, 1, sizeof(inflate_state)); if (!pDecomp) return MZ_MEM_ERROR; pStream->state = (struct mz_internal_state *)pDecomp; tinfl_init(&pDecomp->m_decomp); pDecomp->m_dict_ofs = 0; pDecomp->m_dict_avail = 0; pDecomp->m_last_status = TINFL_STATUS_NEEDS_MORE_INPUT; pDecomp->m_first_call = 1; pDecomp->m_has_flushed = 0; pDecomp->m_window_bits = window_bits; return MZ_OK; } int mz_inflateInit(mz_streamp pStream) { return mz_inflateInit2(pStream, MZ_DEFAULT_WINDOW_BITS); } int mz_inflate(mz_streamp pStream, int flush) { inflate_state *pState; mz_uint n, first_call, decomp_flags = TINFL_FLAG_COMPUTE_ADLER32; size_t in_bytes, out_bytes, orig_avail_in; tinfl_status status; if ((!pStream) || (!pStream->state)) return MZ_STREAM_ERROR; if (flush == MZ_PARTIAL_FLUSH) flush = MZ_SYNC_FLUSH; if ((flush) && (flush != MZ_SYNC_FLUSH) && (flush != MZ_FINISH)) return MZ_STREAM_ERROR; pState = (inflate_state *)pStream->state; if (pState->m_window_bits > 0) decomp_flags |= TINFL_FLAG_PARSE_ZLIB_HEADER; orig_avail_in = pStream->avail_in; first_call = pState->m_first_call; pState->m_first_call = 0; if (pState->m_last_status < 0) return MZ_DATA_ERROR; if (pState->m_has_flushed && (flush != MZ_FINISH)) return MZ_STREAM_ERROR; pState->m_has_flushed |= (flush == MZ_FINISH); if ((flush == MZ_FINISH) && (first_call)) { // MZ_FINISH on the first call implies that the input and output buffers are // large enough to hold the entire compressed/decompressed file. decomp_flags |= TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF; in_bytes = pStream->avail_in; out_bytes = pStream->avail_out; status = tinfl_decompress(&pState->m_decomp, pStream->next_in, &in_bytes, pStream->next_out, pStream->next_out, &out_bytes, decomp_flags); pState->m_last_status = status; pStream->next_in += (mz_uint)in_bytes; pStream->avail_in -= (mz_uint)in_bytes; pStream->total_in += (mz_uint)in_bytes; pStream->adler = tinfl_get_adler32(&pState->m_decomp); pStream->next_out += (mz_uint)out_bytes; pStream->avail_out -= (mz_uint)out_bytes; pStream->total_out += (mz_uint)out_bytes; if (status < 0) return MZ_DATA_ERROR; else if (status != TINFL_STATUS_DONE) { pState->m_last_status = TINFL_STATUS_FAILED; return MZ_BUF_ERROR; } return MZ_STREAM_END; } // flush != MZ_FINISH then we must assume there's more input. if (flush != MZ_FINISH) decomp_flags |= TINFL_FLAG_HAS_MORE_INPUT; if (pState->m_dict_avail) { n = MZ_MIN(pState->m_dict_avail, pStream->avail_out); memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n); pStream->next_out += n; pStream->avail_out -= n; pStream->total_out += n; pState->m_dict_avail -= n; pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1); return ((pState->m_last_status == TINFL_STATUS_DONE) && (!pState->m_dict_avail)) ? MZ_STREAM_END : MZ_OK; } for (;;) { in_bytes = pStream->avail_in; out_bytes = TINFL_LZ_DICT_SIZE - pState->m_dict_ofs; status = tinfl_decompress( &pState->m_decomp, pStream->next_in, &in_bytes, pState->m_dict, pState->m_dict + pState->m_dict_ofs, &out_bytes, decomp_flags); pState->m_last_status = status; pStream->next_in += (mz_uint)in_bytes; pStream->avail_in -= (mz_uint)in_bytes; pStream->total_in += (mz_uint)in_bytes; pStream->adler = tinfl_get_adler32(&pState->m_decomp); pState->m_dict_avail = (mz_uint)out_bytes; n = MZ_MIN(pState->m_dict_avail, pStream->avail_out); memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n); pStream->next_out += n; pStream->avail_out -= n; pStream->total_out += n; pState->m_dict_avail -= n; pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1); if (status < 0) return MZ_DATA_ERROR; // Stream is corrupted (there could be some // uncompressed data left in the output dictionary - // oh well). else if ((status == TINFL_STATUS_NEEDS_MORE_INPUT) && (!orig_avail_in)) return MZ_BUF_ERROR; // Signal caller that we can't make forward progress // without supplying more input or by setting flush // to MZ_FINISH. else if (flush == MZ_FINISH) { // The output buffer MUST be large to hold the remaining uncompressed data // when flush==MZ_FINISH. if (status == TINFL_STATUS_DONE) return pState->m_dict_avail ? MZ_BUF_ERROR : MZ_STREAM_END; // status here must be TINFL_STATUS_HAS_MORE_OUTPUT, which means there's // at least 1 more byte on the way. If there's no more room left in the // output buffer then something is wrong. else if (!pStream->avail_out) return MZ_BUF_ERROR; } else if ((status == TINFL_STATUS_DONE) || (!pStream->avail_in) || (!pStream->avail_out) || (pState->m_dict_avail)) break; } return ((status == TINFL_STATUS_DONE) && (!pState->m_dict_avail)) ? MZ_STREAM_END : MZ_OK; } int mz_inflateEnd(mz_streamp pStream) { if (!pStream) return MZ_STREAM_ERROR; if (pStream->state) { pStream->zfree(pStream->opaque, pStream->state); pStream->state = NULL; } return MZ_OK; } int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len) { mz_stream stream; int status; memset(&stream, 0, sizeof(stream)); // In case mz_ulong is 64-bits (argh I hate longs). if ((source_len | *pDest_len) > 0xFFFFFFFFU) return MZ_PARAM_ERROR; stream.next_in = pSource; stream.avail_in = (mz_uint32)source_len; stream.next_out = pDest; stream.avail_out = (mz_uint32)*pDest_len; status = mz_inflateInit(&stream); if (status != MZ_OK) return status; status = mz_inflate(&stream, MZ_FINISH); if (status != MZ_STREAM_END) { mz_inflateEnd(&stream); return ((status == MZ_BUF_ERROR) && (!stream.avail_in)) ? MZ_DATA_ERROR : status; } *pDest_len = stream.total_out; return mz_inflateEnd(&stream); } const char *mz_error(int err) { static struct { int m_err; const char *m_pDesc; } s_error_descs[] = {{MZ_OK, ""}, {MZ_STREAM_END, "stream end"}, {MZ_NEED_DICT, "need dictionary"}, {MZ_ERRNO, "file error"}, {MZ_STREAM_ERROR, "stream error"}, {MZ_DATA_ERROR, "data error"}, {MZ_MEM_ERROR, "out of memory"}, {MZ_BUF_ERROR, "buf error"}, {MZ_VERSION_ERROR, "version error"}, {MZ_PARAM_ERROR, "parameter error"}}; mz_uint i; for (i = 0; i < sizeof(s_error_descs) / sizeof(s_error_descs[0]); ++i) if (s_error_descs[i].m_err == err) return s_error_descs[i].m_pDesc; return NULL; } #endif // MINIZ_NO_ZLIB_APIS // ------------------- Low-level Decompression (completely independent from all // compression API's) #define TINFL_MEMCPY(d, s, l) memcpy(d, s, l) #define TINFL_MEMSET(p, c, l) memset(p, c, l) #define TINFL_CR_BEGIN \ switch (r->m_state) { \ case 0: #define TINFL_CR_RETURN(state_index, result) \ do { \ status = result; \ r->m_state = state_index; \ goto common_exit; \ case state_index:; \ } \ MZ_MACRO_END #define TINFL_CR_RETURN_FOREVER(state_index, result) \ do { \ for (;;) { \ TINFL_CR_RETURN(state_index, result); \ } \ } \ MZ_MACRO_END #define TINFL_CR_FINISH } // TODO: If the caller has indicated that there's no more input, and we attempt // to read beyond the input buf, then something is wrong with the input because // the inflator never // reads ahead more than it needs to. Currently TINFL_GET_BYTE() pads the end of // the stream with 0's in this scenario. #define TINFL_GET_BYTE(state_index, c) \ do { \ if (pIn_buf_cur >= pIn_buf_end) { \ for (;;) { \ if (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) { \ TINFL_CR_RETURN(state_index, TINFL_STATUS_NEEDS_MORE_INPUT); \ if (pIn_buf_cur < pIn_buf_end) { \ c = *pIn_buf_cur++; \ break; \ } \ } else { \ c = 0; \ break; \ } \ } \ } else \ c = *pIn_buf_cur++; \ } \ MZ_MACRO_END #define TINFL_NEED_BITS(state_index, n) \ do { \ mz_uint c; \ TINFL_GET_BYTE(state_index, c); \ bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); \ num_bits += 8; \ } while (num_bits < (mz_uint)(n)) #define TINFL_SKIP_BITS(state_index, n) \ do { \ if (num_bits < (mz_uint)(n)) { \ TINFL_NEED_BITS(state_index, n); \ } \ bit_buf >>= (n); \ num_bits -= (n); \ } \ MZ_MACRO_END #define TINFL_GET_BITS(state_index, b, n) \ do { \ if (num_bits < (mz_uint)(n)) { \ TINFL_NEED_BITS(state_index, n); \ } \ b = bit_buf & ((1 << (n)) - 1); \ bit_buf >>= (n); \ num_bits -= (n); \ } \ MZ_MACRO_END // TINFL_HUFF_BITBUF_FILL() is only used rarely, when the number of bytes // remaining in the input buffer falls below 2. // It reads just enough bytes from the input stream that are needed to decode // the next Huffman code (and absolutely no more). It works by trying to fully // decode a // Huffman code by using whatever bits are currently present in the bit buffer. // If this fails, it reads another byte, and tries again until it succeeds or // until the // bit buffer contains >=15 bits (deflate's max. Huffman code size). #define TINFL_HUFF_BITBUF_FILL(state_index, pHuff) \ do { \ temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]; \ if (temp >= 0) { \ code_len = temp >> 9; \ if ((code_len) && (num_bits >= code_len)) break; \ } else if (num_bits > TINFL_FAST_LOOKUP_BITS) { \ code_len = TINFL_FAST_LOOKUP_BITS; \ do { \ temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \ } while ((temp < 0) && (num_bits >= (code_len + 1))); \ if (temp >= 0) break; \ } \ TINFL_GET_BYTE(state_index, c); \ bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); \ num_bits += 8; \ } while (num_bits < 15); // TINFL_HUFF_DECODE() decodes the next Huffman coded symbol. It's more complex // than you would initially expect because the zlib API expects the decompressor // to never read // beyond the final byte of the deflate stream. (In other words, when this macro // wants to read another byte from the input, it REALLY needs another byte in // order to fully // decode the next Huffman code.) Handling this properly is particularly // important on raw deflate (non-zlib) streams, which aren't followed by a byte // aligned adler-32. // The slow path is only executed at the very end of the input buffer. #define TINFL_HUFF_DECODE(state_index, sym, pHuff) \ do { \ int temp; \ mz_uint code_len, c; \ if (num_bits < 15) { \ if ((pIn_buf_end - pIn_buf_cur) < 2) { \ TINFL_HUFF_BITBUF_FILL(state_index, pHuff); \ } else { \ bit_buf |= (((tinfl_bit_buf_t)pIn_buf_cur[0]) << num_bits) | \ (((tinfl_bit_buf_t)pIn_buf_cur[1]) << (num_bits + 8)); \ pIn_buf_cur += 2; \ num_bits += 16; \ } \ } \ if ((temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= \ 0) \ code_len = temp >> 9, temp &= 511; \ else { \ code_len = TINFL_FAST_LOOKUP_BITS; \ do { \ temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \ } while (temp < 0); \ } \ sym = temp; \ bit_buf >>= code_len; \ num_bits -= code_len; \ } \ MZ_MACRO_END tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_next, size_t *pIn_buf_size, mz_uint8 *pOut_buf_start, mz_uint8 *pOut_buf_next, size_t *pOut_buf_size, const mz_uint32 decomp_flags) { static const int s_length_base[31] = { 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31, 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0}; static const int s_length_extra[31] = {0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 0, 0}; static const int s_dist_base[32] = { 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193, 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, 8193, 12289, 16385, 24577, 0, 0}; static const int s_dist_extra[32] = {0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13}; static const mz_uint8 s_length_dezigzag[19] = { 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}; static const int s_min_table_sizes[3] = {257, 1, 4}; tinfl_status status = TINFL_STATUS_FAILED; mz_uint32 num_bits, dist, counter, num_extra; tinfl_bit_buf_t bit_buf; const mz_uint8 *pIn_buf_cur = pIn_buf_next, *const pIn_buf_end = pIn_buf_next + *pIn_buf_size; mz_uint8 *pOut_buf_cur = pOut_buf_next, *const pOut_buf_end = pOut_buf_next + *pOut_buf_size; size_t out_buf_size_mask = (decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF) ? (size_t)-1 : ((pOut_buf_next - pOut_buf_start) + *pOut_buf_size) - 1, dist_from_out_buf_start; // Ensure the output buffer's size is a power of 2, unless the output buffer // is large enough to hold the entire output file (in which case it doesn't // matter). if (((out_buf_size_mask + 1) & out_buf_size_mask) || (pOut_buf_next < pOut_buf_start)) { *pIn_buf_size = *pOut_buf_size = 0; return TINFL_STATUS_BAD_PARAM; } num_bits = r->m_num_bits; bit_buf = r->m_bit_buf; dist = r->m_dist; counter = r->m_counter; num_extra = r->m_num_extra; dist_from_out_buf_start = r->m_dist_from_out_buf_start; TINFL_CR_BEGIN bit_buf = num_bits = dist = counter = num_extra = r->m_zhdr0 = r->m_zhdr1 = 0; r->m_z_adler32 = r->m_check_adler32 = 1; if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) { TINFL_GET_BYTE(1, r->m_zhdr0); TINFL_GET_BYTE(2, r->m_zhdr1); counter = (((r->m_zhdr0 * 256 + r->m_zhdr1) % 31 != 0) || (r->m_zhdr1 & 32) || ((r->m_zhdr0 & 15) != 8)); if (!(decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)) counter |= (((1U << (8U + (r->m_zhdr0 >> 4))) > 32768U) || ((out_buf_size_mask + 1) < (size_t)(1ULL << (8U + (r->m_zhdr0 >> 4))))); if (counter) { TINFL_CR_RETURN_FOREVER(36, TINFL_STATUS_FAILED); } } do { TINFL_GET_BITS(3, r->m_final, 3); r->m_type = r->m_final >> 1; if (r->m_type == 0) { TINFL_SKIP_BITS(5, num_bits & 7); for (counter = 0; counter < 4; ++counter) { if (num_bits) TINFL_GET_BITS(6, r->m_raw_header[counter], 8); else TINFL_GET_BYTE(7, r->m_raw_header[counter]); } if ((counter = (r->m_raw_header[0] | (r->m_raw_header[1] << 8))) != (mz_uint)(0xFFFF ^ (r->m_raw_header[2] | (r->m_raw_header[3] << 8)))) { TINFL_CR_RETURN_FOREVER(39, TINFL_STATUS_FAILED); } while ((counter) && (num_bits)) { TINFL_GET_BITS(51, dist, 8); while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(52, TINFL_STATUS_HAS_MORE_OUTPUT); } *pOut_buf_cur++ = (mz_uint8)dist; counter--; } while (counter) { size_t n; while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(9, TINFL_STATUS_HAS_MORE_OUTPUT); } while (pIn_buf_cur >= pIn_buf_end) { if (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) { TINFL_CR_RETURN(38, TINFL_STATUS_NEEDS_MORE_INPUT); } else { TINFL_CR_RETURN_FOREVER(40, TINFL_STATUS_FAILED); } } n = MZ_MIN(MZ_MIN((size_t)(pOut_buf_end - pOut_buf_cur), (size_t)(pIn_buf_end - pIn_buf_cur)), counter); TINFL_MEMCPY(pOut_buf_cur, pIn_buf_cur, n); pIn_buf_cur += n; pOut_buf_cur += n; counter -= (mz_uint)n; } } else if (r->m_type == 3) { TINFL_CR_RETURN_FOREVER(10, TINFL_STATUS_FAILED); } else { if (r->m_type == 1) { mz_uint8 *p = r->m_tables[0].m_code_size; mz_uint i; r->m_table_sizes[0] = 288; r->m_table_sizes[1] = 32; TINFL_MEMSET(r->m_tables[1].m_code_size, 5, 32); for (i = 0; i <= 143; ++i) *p++ = 8; for (; i <= 255; ++i) *p++ = 9; for (; i <= 279; ++i) *p++ = 7; for (; i <= 287; ++i) *p++ = 8; } else { for (counter = 0; counter < 3; counter++) { TINFL_GET_BITS(11, r->m_table_sizes[counter], "\05\05\04"[counter]); r->m_table_sizes[counter] += s_min_table_sizes[counter]; } MZ_CLEAR_OBJ(r->m_tables[2].m_code_size); for (counter = 0; counter < r->m_table_sizes[2]; counter++) { mz_uint s; TINFL_GET_BITS(14, s, 3); r->m_tables[2].m_code_size[s_length_dezigzag[counter]] = (mz_uint8)s; } r->m_table_sizes[2] = 19; } for (; (int)r->m_type >= 0; r->m_type--) { int tree_next, tree_cur; tinfl_huff_table *pTable; mz_uint i, j, used_syms, total, sym_index, next_code[17], total_syms[16]; pTable = &r->m_tables[r->m_type]; MZ_CLEAR_OBJ(total_syms); MZ_CLEAR_OBJ(pTable->m_look_up); MZ_CLEAR_OBJ(pTable->m_tree); for (i = 0; i < r->m_table_sizes[r->m_type]; ++i) total_syms[pTable->m_code_size[i]]++; used_syms = 0, total = 0; next_code[0] = next_code[1] = 0; for (i = 1; i <= 15; ++i) { used_syms += total_syms[i]; next_code[i + 1] = (total = ((total + total_syms[i]) << 1)); } if ((65536 != total) && (used_syms > 1)) { TINFL_CR_RETURN_FOREVER(35, TINFL_STATUS_FAILED); } for (tree_next = -1, sym_index = 0; sym_index < r->m_table_sizes[r->m_type]; ++sym_index) { mz_uint rev_code = 0, l, cur_code, code_size = pTable->m_code_size[sym_index]; if (!code_size) continue; cur_code = next_code[code_size]++; for (l = code_size; l > 0; l--, cur_code >>= 1) rev_code = (rev_code << 1) | (cur_code & 1); if (code_size <= TINFL_FAST_LOOKUP_BITS) { mz_int16 k = (mz_int16)((code_size << 9) | sym_index); while (rev_code < TINFL_FAST_LOOKUP_SIZE) { pTable->m_look_up[rev_code] = k; rev_code += (1 << code_size); } continue; } if (0 == (tree_cur = pTable->m_look_up[rev_code & (TINFL_FAST_LOOKUP_SIZE - 1)])) { pTable->m_look_up[rev_code & (TINFL_FAST_LOOKUP_SIZE - 1)] = (mz_int16)tree_next; tree_cur = tree_next; tree_next -= 2; } rev_code >>= (TINFL_FAST_LOOKUP_BITS - 1); for (j = code_size; j > (TINFL_FAST_LOOKUP_BITS + 1); j--) { tree_cur -= ((rev_code >>= 1) & 1); if (!pTable->m_tree[-tree_cur - 1]) { pTable->m_tree[-tree_cur - 1] = (mz_int16)tree_next; tree_cur = tree_next; tree_next -= 2; } else tree_cur = pTable->m_tree[-tree_cur - 1]; } tree_cur -= ((rev_code >>= 1) & 1); pTable->m_tree[-tree_cur - 1] = (mz_int16)sym_index; } if (r->m_type == 2) { for (counter = 0; counter < (r->m_table_sizes[0] + r->m_table_sizes[1]);) { mz_uint s; TINFL_HUFF_DECODE(16, dist, &r->m_tables[2]); if (dist < 16) { r->m_len_codes[counter++] = (mz_uint8)dist; continue; } if ((dist == 16) && (!counter)) { TINFL_CR_RETURN_FOREVER(17, TINFL_STATUS_FAILED); } num_extra = "\02\03\07"[dist - 16]; TINFL_GET_BITS(18, s, num_extra); s += "\03\03\013"[dist - 16]; TINFL_MEMSET(r->m_len_codes + counter, (dist == 16) ? r->m_len_codes[counter - 1] : 0, s); counter += s; } if ((r->m_table_sizes[0] + r->m_table_sizes[1]) != counter) { TINFL_CR_RETURN_FOREVER(21, TINFL_STATUS_FAILED); } TINFL_MEMCPY(r->m_tables[0].m_code_size, r->m_len_codes, r->m_table_sizes[0]); TINFL_MEMCPY(r->m_tables[1].m_code_size, r->m_len_codes + r->m_table_sizes[0], r->m_table_sizes[1]); } } for (;;) { mz_uint8 *pSrc; for (;;) { if (((pIn_buf_end - pIn_buf_cur) < 4) || ((pOut_buf_end - pOut_buf_cur) < 2)) { TINFL_HUFF_DECODE(23, counter, &r->m_tables[0]); if (counter >= 256) break; while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(24, TINFL_STATUS_HAS_MORE_OUTPUT); } *pOut_buf_cur++ = (mz_uint8)counter; } else { int sym2; mz_uint code_len; #if TINFL_USE_64BIT_BITBUF if (num_bits < 30) { bit_buf |= (((tinfl_bit_buf_t)MZ_READ_LE32(pIn_buf_cur)) << num_bits); pIn_buf_cur += 4; num_bits += 32; } #else if (num_bits < 15) { bit_buf |= (((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits); pIn_buf_cur += 2; num_bits += 16; } #endif if ((sym2 = r->m_tables[0] .m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= 0) code_len = sym2 >> 9; else { code_len = TINFL_FAST_LOOKUP_BITS; do { sym2 = r->m_tables[0] .m_tree[~sym2 + ((bit_buf >> code_len++) & 1)]; } while (sym2 < 0); } counter = sym2; bit_buf >>= code_len; num_bits -= code_len; if (counter & 256) break; #if !TINFL_USE_64BIT_BITBUF if (num_bits < 15) { bit_buf |= (((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits); pIn_buf_cur += 2; num_bits += 16; } #endif if ((sym2 = r->m_tables[0] .m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= 0) code_len = sym2 >> 9; else { code_len = TINFL_FAST_LOOKUP_BITS; do { sym2 = r->m_tables[0] .m_tree[~sym2 + ((bit_buf >> code_len++) & 1)]; } while (sym2 < 0); } bit_buf >>= code_len; num_bits -= code_len; pOut_buf_cur[0] = (mz_uint8)counter; if (sym2 & 256) { pOut_buf_cur++; counter = sym2; break; } pOut_buf_cur[1] = (mz_uint8)sym2; pOut_buf_cur += 2; } } if ((counter &= 511) == 256) break; num_extra = s_length_extra[counter - 257]; counter = s_length_base[counter - 257]; if (num_extra) { mz_uint extra_bits; TINFL_GET_BITS(25, extra_bits, num_extra); counter += extra_bits; } TINFL_HUFF_DECODE(26, dist, &r->m_tables[1]); num_extra = s_dist_extra[dist]; dist = s_dist_base[dist]; if (num_extra) { mz_uint extra_bits; TINFL_GET_BITS(27, extra_bits, num_extra); dist += extra_bits; } dist_from_out_buf_start = pOut_buf_cur - pOut_buf_start; if ((dist > dist_from_out_buf_start) && (decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)) { TINFL_CR_RETURN_FOREVER(37, TINFL_STATUS_FAILED); } pSrc = pOut_buf_start + ((dist_from_out_buf_start - dist) & out_buf_size_mask); if ((MZ_MAX(pOut_buf_cur, pSrc) + counter) > pOut_buf_end) { while (counter--) { while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(53, TINFL_STATUS_HAS_MORE_OUTPUT); } *pOut_buf_cur++ = pOut_buf_start[(dist_from_out_buf_start++ - dist) & out_buf_size_mask]; } continue; } #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES else if ((counter >= 9) && (counter <= dist)) { const mz_uint8 *pSrc_end = pSrc + (counter & ~7); do { ((mz_uint32 *)pOut_buf_cur)[0] = ((const mz_uint32 *)pSrc)[0]; ((mz_uint32 *)pOut_buf_cur)[1] = ((const mz_uint32 *)pSrc)[1]; pOut_buf_cur += 8; } while ((pSrc += 8) < pSrc_end); if ((counter &= 7) < 3) { if (counter) { pOut_buf_cur[0] = pSrc[0]; if (counter > 1) pOut_buf_cur[1] = pSrc[1]; pOut_buf_cur += counter; } continue; } } #endif do { pOut_buf_cur[0] = pSrc[0]; pOut_buf_cur[1] = pSrc[1]; pOut_buf_cur[2] = pSrc[2]; pOut_buf_cur += 3; pSrc += 3; } while ((int)(counter -= 3) > 2); if ((int)counter > 0) { pOut_buf_cur[0] = pSrc[0]; if ((int)counter > 1) pOut_buf_cur[1] = pSrc[1]; pOut_buf_cur += counter; } } } } while (!(r->m_final & 1)); if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) { TINFL_SKIP_BITS(32, num_bits & 7); for (counter = 0; counter < 4; ++counter) { mz_uint s; if (num_bits) TINFL_GET_BITS(41, s, 8); else TINFL_GET_BYTE(42, s); r->m_z_adler32 = (r->m_z_adler32 << 8) | s; } } TINFL_CR_RETURN_FOREVER(34, TINFL_STATUS_DONE); TINFL_CR_FINISH common_exit: r->m_num_bits = num_bits; r->m_bit_buf = bit_buf; r->m_dist = dist; r->m_counter = counter; r->m_num_extra = num_extra; r->m_dist_from_out_buf_start = dist_from_out_buf_start; *pIn_buf_size = pIn_buf_cur - pIn_buf_next; *pOut_buf_size = pOut_buf_cur - pOut_buf_next; if ((decomp_flags & (TINFL_FLAG_PARSE_ZLIB_HEADER | TINFL_FLAG_COMPUTE_ADLER32)) && (status >= 0)) { const mz_uint8 *ptr = pOut_buf_next; size_t buf_len = *pOut_buf_size; mz_uint32 i, s1 = r->m_check_adler32 & 0xffff, s2 = r->m_check_adler32 >> 16; size_t block_len = buf_len % 5552; while (buf_len) { for (i = 0; i + 7 < block_len; i += 8, ptr += 8) { s1 += ptr[0], s2 += s1; s1 += ptr[1], s2 += s1; s1 += ptr[2], s2 += s1; s1 += ptr[3], s2 += s1; s1 += ptr[4], s2 += s1; s1 += ptr[5], s2 += s1; s1 += ptr[6], s2 += s1; s1 += ptr[7], s2 += s1; } for (; i < block_len; ++i) s1 += *ptr++, s2 += s1; s1 %= 65521U, s2 %= 65521U; buf_len -= block_len; block_len = 5552; } r->m_check_adler32 = (s2 << 16) + s1; if ((status == TINFL_STATUS_DONE) && (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) && (r->m_check_adler32 != r->m_z_adler32)) status = TINFL_STATUS_ADLER32_MISMATCH; } return status; } // Higher level helper functions. void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags) { tinfl_decompressor decomp; void *pBuf = NULL, *pNew_buf; size_t src_buf_ofs = 0, out_buf_capacity = 0; *pOut_len = 0; tinfl_init(&decomp); for (;;) { size_t src_buf_size = src_buf_len - src_buf_ofs, dst_buf_size = out_buf_capacity - *pOut_len, new_out_buf_capacity; tinfl_status status = tinfl_decompress( &decomp, (const mz_uint8 *)pSrc_buf + src_buf_ofs, &src_buf_size, (mz_uint8 *)pBuf, pBuf ? (mz_uint8 *)pBuf + *pOut_len : NULL, &dst_buf_size, (flags & ~TINFL_FLAG_HAS_MORE_INPUT) | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF); if ((status < 0) || (status == TINFL_STATUS_NEEDS_MORE_INPUT)) { MZ_FREE(pBuf); *pOut_len = 0; return NULL; } src_buf_ofs += src_buf_size; *pOut_len += dst_buf_size; if (status == TINFL_STATUS_DONE) break; new_out_buf_capacity = out_buf_capacity * 2; if (new_out_buf_capacity < 128) new_out_buf_capacity = 128; pNew_buf = MZ_REALLOC(pBuf, new_out_buf_capacity); if (!pNew_buf) { MZ_FREE(pBuf); *pOut_len = 0; return NULL; } pBuf = pNew_buf; out_buf_capacity = new_out_buf_capacity; } return pBuf; } size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags) { tinfl_decompressor decomp; tinfl_status status; tinfl_init(&decomp); status = tinfl_decompress(&decomp, (const mz_uint8 *)pSrc_buf, &src_buf_len, (mz_uint8 *)pOut_buf, (mz_uint8 *)pOut_buf, &out_buf_len, (flags & ~TINFL_FLAG_HAS_MORE_INPUT) | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF); return (status != TINFL_STATUS_DONE) ? TINFL_DECOMPRESS_MEM_TO_MEM_FAILED : out_buf_len; } int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size, tinfl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags) { int result = 0; tinfl_decompressor decomp; mz_uint8 *pDict = (mz_uint8 *)MZ_MALLOC(TINFL_LZ_DICT_SIZE); size_t in_buf_ofs = 0, dict_ofs = 0; if (!pDict) return TINFL_STATUS_FAILED; tinfl_init(&decomp); for (;;) { size_t in_buf_size = *pIn_buf_size - in_buf_ofs, dst_buf_size = TINFL_LZ_DICT_SIZE - dict_ofs; tinfl_status status = tinfl_decompress(&decomp, (const mz_uint8 *)pIn_buf + in_buf_ofs, &in_buf_size, pDict, pDict + dict_ofs, &dst_buf_size, (flags & ~(TINFL_FLAG_HAS_MORE_INPUT | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF))); in_buf_ofs += in_buf_size; if ((dst_buf_size) && (!(*pPut_buf_func)(pDict + dict_ofs, (int)dst_buf_size, pPut_buf_user))) break; if (status != TINFL_STATUS_HAS_MORE_OUTPUT) { result = (status == TINFL_STATUS_DONE); break; } dict_ofs = (dict_ofs + dst_buf_size) & (TINFL_LZ_DICT_SIZE - 1); } MZ_FREE(pDict); *pIn_buf_size = in_buf_ofs; return result; } // ------------------- Low-level Compression (independent from all decompression // API's) // Purposely making these tables static for faster init and thread safety. static const mz_uint16 s_tdefl_len_sym[256] = { 257, 258, 259, 260, 261, 262, 263, 264, 265, 265, 266, 266, 267, 267, 268, 268, 269, 269, 269, 269, 270, 270, 270, 270, 271, 271, 271, 271, 272, 272, 272, 272, 273, 273, 273, 273, 273, 273, 273, 273, 274, 274, 274, 274, 274, 274, 274, 274, 275, 275, 275, 275, 275, 275, 275, 275, 276, 276, 276, 276, 276, 276, 276, 276, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 285}; static const mz_uint8 s_tdefl_len_extra[256] = { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 0}; static const mz_uint8 s_tdefl_small_dist_sym[512] = { 0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17}; static const mz_uint8 s_tdefl_small_dist_extra[512] = { 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7}; static const mz_uint8 s_tdefl_large_dist_sym[128] = { 0, 0, 18, 19, 20, 20, 21, 21, 22, 22, 22, 22, 23, 23, 23, 23, 24, 24, 24, 24, 24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 25, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29}; static const mz_uint8 s_tdefl_large_dist_extra[128] = { 0, 0, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13}; // Radix sorts tdefl_sym_freq[] array by 16-bit key m_key. Returns ptr to sorted // values. typedef struct { mz_uint16 m_key, m_sym_index; } tdefl_sym_freq; static tdefl_sym_freq *tdefl_radix_sort_syms(mz_uint num_syms, tdefl_sym_freq *pSyms0, tdefl_sym_freq *pSyms1) { mz_uint32 total_passes = 2, pass_shift, pass, i, hist[256 * 2]; tdefl_sym_freq *pCur_syms = pSyms0, *pNew_syms = pSyms1; MZ_CLEAR_OBJ(hist); for (i = 0; i < num_syms; i++) { mz_uint freq = pSyms0[i].m_key; hist[freq & 0xFF]++; hist[256 + ((freq >> 8) & 0xFF)]++; } while ((total_passes > 1) && (num_syms == hist[(total_passes - 1) * 256])) total_passes--; for (pass_shift = 0, pass = 0; pass < total_passes; pass++, pass_shift += 8) { const mz_uint32 *pHist = &hist[pass << 8]; mz_uint offsets[256], cur_ofs = 0; for (i = 0; i < 256; i++) { offsets[i] = cur_ofs; cur_ofs += pHist[i]; } for (i = 0; i < num_syms; i++) pNew_syms[offsets[(pCur_syms[i].m_key >> pass_shift) & 0xFF]++] = pCur_syms[i]; { tdefl_sym_freq *t = pCur_syms; pCur_syms = pNew_syms; pNew_syms = t; } } return pCur_syms; } // tdefl_calculate_minimum_redundancy() originally written by: Alistair Moffat, // alistair@cs.mu.oz.au, Jyrki Katajainen, jyrki@diku.dk, November 1996. static void tdefl_calculate_minimum_redundancy(tdefl_sym_freq *A, int n) { int root, leaf, next, avbl, used, dpth; if (n == 0) return; else if (n == 1) { A[0].m_key = 1; return; } A[0].m_key += A[1].m_key; root = 0; leaf = 2; for (next = 1; next < n - 1; next++) { if (leaf >= n || A[root].m_key < A[leaf].m_key) { A[next].m_key = A[root].m_key; A[root++].m_key = (mz_uint16)next; } else A[next].m_key = A[leaf++].m_key; if (leaf >= n || (root < next && A[root].m_key < A[leaf].m_key)) { A[next].m_key = (mz_uint16)(A[next].m_key + A[root].m_key); A[root++].m_key = (mz_uint16)next; } else A[next].m_key = (mz_uint16)(A[next].m_key + A[leaf++].m_key); } A[n - 2].m_key = 0; for (next = n - 3; next >= 0; next--) A[next].m_key = A[A[next].m_key].m_key + 1; avbl = 1; used = dpth = 0; root = n - 2; next = n - 1; while (avbl > 0) { while (root >= 0 && (int)A[root].m_key == dpth) { used++; root--; } while (avbl > used) { A[next--].m_key = (mz_uint16)(dpth); avbl--; } avbl = 2 * used; dpth++; used = 0; } } // Limits canonical Huffman code table's max code size. enum { TDEFL_MAX_SUPPORTED_HUFF_CODESIZE = 32 }; static void tdefl_huffman_enforce_max_code_size(int *pNum_codes, int code_list_len, int max_code_size) { int i; mz_uint32 total = 0; if (code_list_len <= 1) return; for (i = max_code_size + 1; i <= TDEFL_MAX_SUPPORTED_HUFF_CODESIZE; i++) pNum_codes[max_code_size] += pNum_codes[i]; for (i = max_code_size; i > 0; i--) total += (((mz_uint32)pNum_codes[i]) << (max_code_size - i)); while (total != (1UL << max_code_size)) { pNum_codes[max_code_size]--; for (i = max_code_size - 1; i > 0; i--) if (pNum_codes[i]) { pNum_codes[i]--; pNum_codes[i + 1] += 2; break; } total--; } } static void tdefl_optimize_huffman_table(tdefl_compressor *d, int table_num, int table_len, int code_size_limit, int static_table) { int i, j, l, num_codes[1 + TDEFL_MAX_SUPPORTED_HUFF_CODESIZE]; mz_uint next_code[TDEFL_MAX_SUPPORTED_HUFF_CODESIZE + 1]; MZ_CLEAR_OBJ(num_codes); if (static_table) { for (i = 0; i < table_len; i++) num_codes[d->m_huff_code_sizes[table_num][i]]++; } else { tdefl_sym_freq syms0[TDEFL_MAX_HUFF_SYMBOLS], syms1[TDEFL_MAX_HUFF_SYMBOLS], *pSyms; int num_used_syms = 0; const mz_uint16 *pSym_count = &d->m_huff_count[table_num][0]; for (i = 0; i < table_len; i++) if (pSym_count[i]) { syms0[num_used_syms].m_key = (mz_uint16)pSym_count[i]; syms0[num_used_syms++].m_sym_index = (mz_uint16)i; } pSyms = tdefl_radix_sort_syms(num_used_syms, syms0, syms1); tdefl_calculate_minimum_redundancy(pSyms, num_used_syms); for (i = 0; i < num_used_syms; i++) num_codes[pSyms[i].m_key]++; tdefl_huffman_enforce_max_code_size(num_codes, num_used_syms, code_size_limit); MZ_CLEAR_OBJ(d->m_huff_code_sizes[table_num]); MZ_CLEAR_OBJ(d->m_huff_codes[table_num]); for (i = 1, j = num_used_syms; i <= code_size_limit; i++) for (l = num_codes[i]; l > 0; l--) d->m_huff_code_sizes[table_num][pSyms[--j].m_sym_index] = (mz_uint8)(i); } next_code[1] = 0; for (j = 0, i = 2; i <= code_size_limit; i++) next_code[i] = j = ((j + num_codes[i - 1]) << 1); for (i = 0; i < table_len; i++) { mz_uint rev_code = 0, code, code_size; if ((code_size = d->m_huff_code_sizes[table_num][i]) == 0) continue; code = next_code[code_size]++; for (l = code_size; l > 0; l--, code >>= 1) rev_code = (rev_code << 1) | (code & 1); d->m_huff_codes[table_num][i] = (mz_uint16)rev_code; } } #define TDEFL_PUT_BITS(b, l) \ do { \ mz_uint bits = b; \ mz_uint len = l; \ MZ_ASSERT(bits <= ((1U << len) - 1U)); \ d->m_bit_buffer |= (bits << d->m_bits_in); \ d->m_bits_in += len; \ while (d->m_bits_in >= 8) { \ if (d->m_pOutput_buf < d->m_pOutput_buf_end) \ *d->m_pOutput_buf++ = (mz_uint8)(d->m_bit_buffer); \ d->m_bit_buffer >>= 8; \ d->m_bits_in -= 8; \ } \ } \ MZ_MACRO_END #define TDEFL_RLE_PREV_CODE_SIZE() \ { \ if (rle_repeat_count) { \ if (rle_repeat_count < 3) { \ d->m_huff_count[2][prev_code_size] = (mz_uint16)( \ d->m_huff_count[2][prev_code_size] + rle_repeat_count); \ while (rle_repeat_count--) \ packed_code_sizes[num_packed_code_sizes++] = prev_code_size; \ } else { \ d->m_huff_count[2][16] = (mz_uint16)(d->m_huff_count[2][16] + 1); \ packed_code_sizes[num_packed_code_sizes++] = 16; \ packed_code_sizes[num_packed_code_sizes++] = \ (mz_uint8)(rle_repeat_count - 3); \ } \ rle_repeat_count = 0; \ } \ } #define TDEFL_RLE_ZERO_CODE_SIZE() \ { \ if (rle_z_count) { \ if (rle_z_count < 3) { \ d->m_huff_count[2][0] = \ (mz_uint16)(d->m_huff_count[2][0] + rle_z_count); \ while (rle_z_count--) packed_code_sizes[num_packed_code_sizes++] = 0; \ } else if (rle_z_count <= 10) { \ d->m_huff_count[2][17] = (mz_uint16)(d->m_huff_count[2][17] + 1); \ packed_code_sizes[num_packed_code_sizes++] = 17; \ packed_code_sizes[num_packed_code_sizes++] = \ (mz_uint8)(rle_z_count - 3); \ } else { \ d->m_huff_count[2][18] = (mz_uint16)(d->m_huff_count[2][18] + 1); \ packed_code_sizes[num_packed_code_sizes++] = 18; \ packed_code_sizes[num_packed_code_sizes++] = \ (mz_uint8)(rle_z_count - 11); \ } \ rle_z_count = 0; \ } \ } static mz_uint8 s_tdefl_packed_code_size_syms_swizzle[] = { 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}; static void tdefl_start_dynamic_block(tdefl_compressor *d) { int num_lit_codes, num_dist_codes, num_bit_lengths; mz_uint i, total_code_sizes_to_pack, num_packed_code_sizes, rle_z_count, rle_repeat_count, packed_code_sizes_index; mz_uint8 code_sizes_to_pack[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1], packed_code_sizes[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1], prev_code_size = 0xFF; d->m_huff_count[0][256] = 1; tdefl_optimize_huffman_table(d, 0, TDEFL_MAX_HUFF_SYMBOLS_0, 15, MZ_FALSE); tdefl_optimize_huffman_table(d, 1, TDEFL_MAX_HUFF_SYMBOLS_1, 15, MZ_FALSE); for (num_lit_codes = 286; num_lit_codes > 257; num_lit_codes--) if (d->m_huff_code_sizes[0][num_lit_codes - 1]) break; for (num_dist_codes = 30; num_dist_codes > 1; num_dist_codes--) if (d->m_huff_code_sizes[1][num_dist_codes - 1]) break; memcpy(code_sizes_to_pack, &d->m_huff_code_sizes[0][0], num_lit_codes); memcpy(code_sizes_to_pack + num_lit_codes, &d->m_huff_code_sizes[1][0], num_dist_codes); total_code_sizes_to_pack = num_lit_codes + num_dist_codes; num_packed_code_sizes = 0; rle_z_count = 0; rle_repeat_count = 0; memset(&d->m_huff_count[2][0], 0, sizeof(d->m_huff_count[2][0]) * TDEFL_MAX_HUFF_SYMBOLS_2); for (i = 0; i < total_code_sizes_to_pack; i++) { mz_uint8 code_size = code_sizes_to_pack[i]; if (!code_size) { TDEFL_RLE_PREV_CODE_SIZE(); if (++rle_z_count == 138) { TDEFL_RLE_ZERO_CODE_SIZE(); } } else { TDEFL_RLE_ZERO_CODE_SIZE(); if (code_size != prev_code_size) { TDEFL_RLE_PREV_CODE_SIZE(); d->m_huff_count[2][code_size] = (mz_uint16)(d->m_huff_count[2][code_size] + 1); packed_code_sizes[num_packed_code_sizes++] = code_size; } else if (++rle_repeat_count == 6) { TDEFL_RLE_PREV_CODE_SIZE(); } } prev_code_size = code_size; } if (rle_repeat_count) { TDEFL_RLE_PREV_CODE_SIZE(); } else { TDEFL_RLE_ZERO_CODE_SIZE(); } tdefl_optimize_huffman_table(d, 2, TDEFL_MAX_HUFF_SYMBOLS_2, 7, MZ_FALSE); TDEFL_PUT_BITS(2, 2); TDEFL_PUT_BITS(num_lit_codes - 257, 5); TDEFL_PUT_BITS(num_dist_codes - 1, 5); for (num_bit_lengths = 18; num_bit_lengths >= 0; num_bit_lengths--) if (d->m_huff_code_sizes [2][s_tdefl_packed_code_size_syms_swizzle[num_bit_lengths]]) break; num_bit_lengths = MZ_MAX(4, (num_bit_lengths + 1)); TDEFL_PUT_BITS(num_bit_lengths - 4, 4); for (i = 0; (int)i < num_bit_lengths; i++) TDEFL_PUT_BITS( d->m_huff_code_sizes[2][s_tdefl_packed_code_size_syms_swizzle[i]], 3); for (packed_code_sizes_index = 0; packed_code_sizes_index < num_packed_code_sizes;) { mz_uint code = packed_code_sizes[packed_code_sizes_index++]; MZ_ASSERT(code < TDEFL_MAX_HUFF_SYMBOLS_2); TDEFL_PUT_BITS(d->m_huff_codes[2][code], d->m_huff_code_sizes[2][code]); if (code >= 16) TDEFL_PUT_BITS(packed_code_sizes[packed_code_sizes_index++], "\02\03\07"[code - 16]); } } static void tdefl_start_static_block(tdefl_compressor *d) { mz_uint i; mz_uint8 *p = &d->m_huff_code_sizes[0][0]; for (i = 0; i <= 143; ++i) *p++ = 8; for (; i <= 255; ++i) *p++ = 9; for (; i <= 279; ++i) *p++ = 7; for (; i <= 287; ++i) *p++ = 8; memset(d->m_huff_code_sizes[1], 5, 32); tdefl_optimize_huffman_table(d, 0, 288, 15, MZ_TRUE); tdefl_optimize_huffman_table(d, 1, 32, 15, MZ_TRUE); TDEFL_PUT_BITS(1, 2); } static const mz_uint mz_bitmasks[17] = { 0x0000, 0x0001, 0x0003, 0x0007, 0x000F, 0x001F, 0x003F, 0x007F, 0x00FF, 0x01FF, 0x03FF, 0x07FF, 0x0FFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF}; #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN && \ MINIZ_HAS_64BIT_REGISTERS static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d) { mz_uint flags; mz_uint8 *pLZ_codes; mz_uint8 *pOutput_buf = d->m_pOutput_buf; mz_uint8 *pLZ_code_buf_end = d->m_pLZ_code_buf; mz_uint64 bit_buffer = d->m_bit_buffer; mz_uint bits_in = d->m_bits_in; #define TDEFL_PUT_BITS_FAST(b, l) \ { \ bit_buffer |= (((mz_uint64)(b)) << bits_in); \ bits_in += (l); \ } flags = 1; for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < pLZ_code_buf_end; flags >>= 1) { if (flags == 1) flags = *pLZ_codes++ | 0x100; if (flags & 1) { mz_uint s0, s1, n0, n1, sym, num_extra_bits; mz_uint match_len = pLZ_codes[0], match_dist = *(const mz_uint16 *)(pLZ_codes + 1); pLZ_codes += 3; MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][s_tdefl_len_sym[match_len]], d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); TDEFL_PUT_BITS_FAST(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]], s_tdefl_len_extra[match_len]); // This sequence coaxes MSVC into using cmov's vs. jmp's. s0 = s_tdefl_small_dist_sym[match_dist & 511]; n0 = s_tdefl_small_dist_extra[match_dist & 511]; s1 = s_tdefl_large_dist_sym[match_dist >> 8]; n1 = s_tdefl_large_dist_extra[match_dist >> 8]; sym = (match_dist < 512) ? s0 : s1; num_extra_bits = (match_dist < 512) ? n0 : n1; MZ_ASSERT(d->m_huff_code_sizes[1][sym]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[1][sym], d->m_huff_code_sizes[1][sym]); TDEFL_PUT_BITS_FAST(match_dist & mz_bitmasks[num_extra_bits], num_extra_bits); } else { mz_uint lit = *pLZ_codes++; MZ_ASSERT(d->m_huff_code_sizes[0][lit]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]); if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end)) { flags >>= 1; lit = *pLZ_codes++; MZ_ASSERT(d->m_huff_code_sizes[0][lit]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]); if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end)) { flags >>= 1; lit = *pLZ_codes++; MZ_ASSERT(d->m_huff_code_sizes[0][lit]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]); } } } if (pOutput_buf >= d->m_pOutput_buf_end) return MZ_FALSE; *(mz_uint64 *)pOutput_buf = bit_buffer; pOutput_buf += (bits_in >> 3); bit_buffer >>= (bits_in & ~7); bits_in &= 7; } #undef TDEFL_PUT_BITS_FAST d->m_pOutput_buf = pOutput_buf; d->m_bits_in = 0; d->m_bit_buffer = 0; while (bits_in) { mz_uint32 n = MZ_MIN(bits_in, 16); TDEFL_PUT_BITS((mz_uint)bit_buffer & mz_bitmasks[n], n); bit_buffer >>= n; bits_in -= n; } TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]); return (d->m_pOutput_buf < d->m_pOutput_buf_end); } #else static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d) { mz_uint flags; mz_uint8 *pLZ_codes; flags = 1; for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < d->m_pLZ_code_buf; flags >>= 1) { if (flags == 1) flags = *pLZ_codes++ | 0x100; if (flags & 1) { mz_uint sym, num_extra_bits; mz_uint match_len = pLZ_codes[0], match_dist = (pLZ_codes[1] | (pLZ_codes[2] << 8)); pLZ_codes += 3; MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); TDEFL_PUT_BITS(d->m_huff_codes[0][s_tdefl_len_sym[match_len]], d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); TDEFL_PUT_BITS(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]], s_tdefl_len_extra[match_len]); if (match_dist < 512) { sym = s_tdefl_small_dist_sym[match_dist]; num_extra_bits = s_tdefl_small_dist_extra[match_dist]; } else { sym = s_tdefl_large_dist_sym[match_dist >> 8]; num_extra_bits = s_tdefl_large_dist_extra[match_dist >> 8]; } MZ_ASSERT(d->m_huff_code_sizes[1][sym]); TDEFL_PUT_BITS(d->m_huff_codes[1][sym], d->m_huff_code_sizes[1][sym]); TDEFL_PUT_BITS(match_dist & mz_bitmasks[num_extra_bits], num_extra_bits); } else { mz_uint lit = *pLZ_codes++; MZ_ASSERT(d->m_huff_code_sizes[0][lit]); TDEFL_PUT_BITS(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]); } } TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]); return (d->m_pOutput_buf < d->m_pOutput_buf_end); } #endif // MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN && // MINIZ_HAS_64BIT_REGISTERS static mz_bool tdefl_compress_block(tdefl_compressor *d, mz_bool static_block) { if (static_block) tdefl_start_static_block(d); else tdefl_start_dynamic_block(d); return tdefl_compress_lz_codes(d); } static int tdefl_flush_block(tdefl_compressor *d, int flush) { mz_uint saved_bit_buf, saved_bits_in; mz_uint8 *pSaved_output_buf; mz_bool comp_block_succeeded = MZ_FALSE; int n, use_raw_block = ((d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS) != 0) && (d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size; mz_uint8 *pOutput_buf_start = ((d->m_pPut_buf_func == NULL) && ((*d->m_pOut_buf_size - d->m_out_buf_ofs) >= TDEFL_OUT_BUF_SIZE)) ? ((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs) : d->m_output_buf; d->m_pOutput_buf = pOutput_buf_start; d->m_pOutput_buf_end = d->m_pOutput_buf + TDEFL_OUT_BUF_SIZE - 16; MZ_ASSERT(!d->m_output_flush_remaining); d->m_output_flush_ofs = 0; d->m_output_flush_remaining = 0; *d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> d->m_num_flags_left); d->m_pLZ_code_buf -= (d->m_num_flags_left == 8); if ((d->m_flags & TDEFL_WRITE_ZLIB_HEADER) && (!d->m_block_index)) { TDEFL_PUT_BITS(0x78, 8); TDEFL_PUT_BITS(0x01, 8); } TDEFL_PUT_BITS(flush == TDEFL_FINISH, 1); pSaved_output_buf = d->m_pOutput_buf; saved_bit_buf = d->m_bit_buffer; saved_bits_in = d->m_bits_in; if (!use_raw_block) comp_block_succeeded = tdefl_compress_block(d, (d->m_flags & TDEFL_FORCE_ALL_STATIC_BLOCKS) || (d->m_total_lz_bytes < 48)); // If the block gets expanded, forget the current contents of the output // buffer and send a raw block instead. if (((use_raw_block) || ((d->m_total_lz_bytes) && ((d->m_pOutput_buf - pSaved_output_buf + 1U) >= d->m_total_lz_bytes))) && ((d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size)) { mz_uint i; d->m_pOutput_buf = pSaved_output_buf; d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in; TDEFL_PUT_BITS(0, 2); if (d->m_bits_in) { TDEFL_PUT_BITS(0, 8 - d->m_bits_in); } for (i = 2; i; --i, d->m_total_lz_bytes ^= 0xFFFF) { TDEFL_PUT_BITS(d->m_total_lz_bytes & 0xFFFF, 16); } for (i = 0; i < d->m_total_lz_bytes; ++i) { TDEFL_PUT_BITS( d->m_dict[(d->m_lz_code_buf_dict_pos + i) & TDEFL_LZ_DICT_SIZE_MASK], 8); } } // Check for the extremely unlikely (if not impossible) case of the compressed // block not fitting into the output buffer when using dynamic codes. else if (!comp_block_succeeded) { d->m_pOutput_buf = pSaved_output_buf; d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in; tdefl_compress_block(d, MZ_TRUE); } if (flush) { if (flush == TDEFL_FINISH) { if (d->m_bits_in) { TDEFL_PUT_BITS(0, 8 - d->m_bits_in); } if (d->m_flags & TDEFL_WRITE_ZLIB_HEADER) { mz_uint i, a = d->m_adler32; for (i = 0; i < 4; i++) { TDEFL_PUT_BITS((a >> 24) & 0xFF, 8); a <<= 8; } } } else { mz_uint i, z = 0; TDEFL_PUT_BITS(0, 3); if (d->m_bits_in) { TDEFL_PUT_BITS(0, 8 - d->m_bits_in); } for (i = 2; i; --i, z ^= 0xFFFF) { TDEFL_PUT_BITS(z & 0xFFFF, 16); } } } MZ_ASSERT(d->m_pOutput_buf < d->m_pOutput_buf_end); memset(&d->m_huff_count[0][0], 0, sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0); memset(&d->m_huff_count[1][0], 0, sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1); d->m_pLZ_code_buf = d->m_lz_code_buf + 1; d->m_pLZ_flags = d->m_lz_code_buf; d->m_num_flags_left = 8; d->m_lz_code_buf_dict_pos += d->m_total_lz_bytes; d->m_total_lz_bytes = 0; d->m_block_index++; if ((n = (int)(d->m_pOutput_buf - pOutput_buf_start)) != 0) { if (d->m_pPut_buf_func) { *d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf; if (!(*d->m_pPut_buf_func)(d->m_output_buf, n, d->m_pPut_buf_user)) return (d->m_prev_return_status = TDEFL_STATUS_PUT_BUF_FAILED); } else if (pOutput_buf_start == d->m_output_buf) { int bytes_to_copy = (int)MZ_MIN( (size_t)n, (size_t)(*d->m_pOut_buf_size - d->m_out_buf_ofs)); memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs, d->m_output_buf, bytes_to_copy); d->m_out_buf_ofs += bytes_to_copy; if ((n -= bytes_to_copy) != 0) { d->m_output_flush_ofs = bytes_to_copy; d->m_output_flush_remaining = n; } } else { d->m_out_buf_ofs += n; } } return d->m_output_flush_remaining; } #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES #define TDEFL_READ_UNALIGNED_WORD(p) *(const mz_uint16 *)(p) static MZ_FORCEINLINE void tdefl_find_match( tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist, mz_uint max_match_len, mz_uint *pMatch_dist, mz_uint *pMatch_len) { mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK, match_len = *pMatch_len, probe_pos = pos, next_probe_pos, probe_len; mz_uint num_probes_left = d->m_max_probes[match_len >= 32]; const mz_uint16 *s = (const mz_uint16 *)(d->m_dict + pos), *p, *q; mz_uint16 c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]), s01 = TDEFL_READ_UNALIGNED_WORD(s); MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN); if (max_match_len <= match_len) return; for (;;) { for (;;) { if (--num_probes_left == 0) return; #define TDEFL_PROBE \ next_probe_pos = d->m_next[probe_pos]; \ if ((!next_probe_pos) || \ ((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) \ return; \ probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \ if (TDEFL_READ_UNALIGNED_WORD(&d->m_dict[probe_pos + match_len - 1]) == c01) \ break; TDEFL_PROBE; TDEFL_PROBE; TDEFL_PROBE; } if (!dist) break; q = (const mz_uint16 *)(d->m_dict + probe_pos); if (TDEFL_READ_UNALIGNED_WORD(q) != s01) continue; p = s; probe_len = 32; do { } while ( (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (--probe_len > 0)); if (!probe_len) { *pMatch_dist = dist; *pMatch_len = MZ_MIN(max_match_len, TDEFL_MAX_MATCH_LEN); break; } else if ((probe_len = ((mz_uint)(p - s) * 2) + (mz_uint)(*(const mz_uint8 *)p == *(const mz_uint8 *)q)) > match_len) { *pMatch_dist = dist; if ((*pMatch_len = match_len = MZ_MIN(max_match_len, probe_len)) == max_match_len) break; c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]); } } } #else static MZ_FORCEINLINE void tdefl_find_match( tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist, mz_uint max_match_len, mz_uint *pMatch_dist, mz_uint *pMatch_len) { mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK, match_len = *pMatch_len, probe_pos = pos, next_probe_pos, probe_len; mz_uint num_probes_left = d->m_max_probes[match_len >= 32]; const mz_uint8 *s = d->m_dict + pos, *p, *q; mz_uint8 c0 = d->m_dict[pos + match_len], c1 = d->m_dict[pos + match_len - 1]; MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN); if (max_match_len <= match_len) return; for (;;) { for (;;) { if (--num_probes_left == 0) return; #define TDEFL_PROBE \ next_probe_pos = d->m_next[probe_pos]; \ if ((!next_probe_pos) || \ ((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) \ return; \ probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \ if ((d->m_dict[probe_pos + match_len] == c0) && \ (d->m_dict[probe_pos + match_len - 1] == c1)) \ break; TDEFL_PROBE; TDEFL_PROBE; TDEFL_PROBE; } if (!dist) break; p = s; q = d->m_dict + probe_pos; for (probe_len = 0; probe_len < max_match_len; probe_len++) if (*p++ != *q++) break; if (probe_len > match_len) { *pMatch_dist = dist; if ((*pMatch_len = match_len = probe_len) == max_match_len) return; c0 = d->m_dict[pos + match_len]; c1 = d->m_dict[pos + match_len - 1]; } } } #endif // #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN static mz_bool tdefl_compress_fast(tdefl_compressor *d) { // Faster, minimally featured LZRW1-style match+parse loop with better // register utilization. Intended for applications where raw throughput is // valued more highly than ratio. mz_uint lookahead_pos = d->m_lookahead_pos, lookahead_size = d->m_lookahead_size, dict_size = d->m_dict_size, total_lz_bytes = d->m_total_lz_bytes, num_flags_left = d->m_num_flags_left; mz_uint8 *pLZ_code_buf = d->m_pLZ_code_buf, *pLZ_flags = d->m_pLZ_flags; mz_uint cur_pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK; while ((d->m_src_buf_left) || ((d->m_flush) && (lookahead_size))) { const mz_uint TDEFL_COMP_FAST_LOOKAHEAD_SIZE = 4096; mz_uint dst_pos = (lookahead_pos + lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK; mz_uint num_bytes_to_process = (mz_uint)MZ_MIN( d->m_src_buf_left, TDEFL_COMP_FAST_LOOKAHEAD_SIZE - lookahead_size); d->m_src_buf_left -= num_bytes_to_process; lookahead_size += num_bytes_to_process; while (num_bytes_to_process) { mz_uint32 n = MZ_MIN(TDEFL_LZ_DICT_SIZE - dst_pos, num_bytes_to_process); memcpy(d->m_dict + dst_pos, d->m_pSrc, n); if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1)) memcpy(d->m_dict + TDEFL_LZ_DICT_SIZE + dst_pos, d->m_pSrc, MZ_MIN(n, (TDEFL_MAX_MATCH_LEN - 1) - dst_pos)); d->m_pSrc += n; dst_pos = (dst_pos + n) & TDEFL_LZ_DICT_SIZE_MASK; num_bytes_to_process -= n; } dict_size = MZ_MIN(TDEFL_LZ_DICT_SIZE - lookahead_size, dict_size); if ((!d->m_flush) && (lookahead_size < TDEFL_COMP_FAST_LOOKAHEAD_SIZE)) break; while (lookahead_size >= 4) { mz_uint cur_match_dist, cur_match_len = 1; mz_uint8 *pCur_dict = d->m_dict + cur_pos; mz_uint first_trigram = (*(const mz_uint32 *)pCur_dict) & 0xFFFFFF; mz_uint hash = (first_trigram ^ (first_trigram >> (24 - (TDEFL_LZ_HASH_BITS - 8)))) & TDEFL_LEVEL1_HASH_SIZE_MASK; mz_uint probe_pos = d->m_hash[hash]; d->m_hash[hash] = (mz_uint16)lookahead_pos; if (((cur_match_dist = (mz_uint16)(lookahead_pos - probe_pos)) <= dict_size) && ((*(const mz_uint32 *)(d->m_dict + (probe_pos &= TDEFL_LZ_DICT_SIZE_MASK)) & 0xFFFFFF) == first_trigram)) { const mz_uint16 *p = (const mz_uint16 *)pCur_dict; const mz_uint16 *q = (const mz_uint16 *)(d->m_dict + probe_pos); mz_uint32 probe_len = 32; do { } while ((TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (--probe_len > 0)); cur_match_len = ((mz_uint)(p - (const mz_uint16 *)pCur_dict) * 2) + (mz_uint)(*(const mz_uint8 *)p == *(const mz_uint8 *)q); if (!probe_len) cur_match_len = cur_match_dist ? TDEFL_MAX_MATCH_LEN : 0; if ((cur_match_len < TDEFL_MIN_MATCH_LEN) || ((cur_match_len == TDEFL_MIN_MATCH_LEN) && (cur_match_dist >= 8U * 1024U))) { cur_match_len = 1; *pLZ_code_buf++ = (mz_uint8)first_trigram; *pLZ_flags = (mz_uint8)(*pLZ_flags >> 1); d->m_huff_count[0][(mz_uint8)first_trigram]++; } else { mz_uint32 s0, s1; cur_match_len = MZ_MIN(cur_match_len, lookahead_size); MZ_ASSERT((cur_match_len >= TDEFL_MIN_MATCH_LEN) && (cur_match_dist >= 1) && (cur_match_dist <= TDEFL_LZ_DICT_SIZE)); cur_match_dist--; pLZ_code_buf[0] = (mz_uint8)(cur_match_len - TDEFL_MIN_MATCH_LEN); *(mz_uint16 *)(&pLZ_code_buf[1]) = (mz_uint16)cur_match_dist; pLZ_code_buf += 3; *pLZ_flags = (mz_uint8)((*pLZ_flags >> 1) | 0x80); s0 = s_tdefl_small_dist_sym[cur_match_dist & 511]; s1 = s_tdefl_large_dist_sym[cur_match_dist >> 8]; d->m_huff_count[1][(cur_match_dist < 512) ? s0 : s1]++; d->m_huff_count[0][s_tdefl_len_sym[cur_match_len - TDEFL_MIN_MATCH_LEN]]++; } } else { *pLZ_code_buf++ = (mz_uint8)first_trigram; *pLZ_flags = (mz_uint8)(*pLZ_flags >> 1); d->m_huff_count[0][(mz_uint8)first_trigram]++; } if (--num_flags_left == 0) { num_flags_left = 8; pLZ_flags = pLZ_code_buf++; } total_lz_bytes += cur_match_len; lookahead_pos += cur_match_len; dict_size = MZ_MIN(dict_size + cur_match_len, TDEFL_LZ_DICT_SIZE); cur_pos = (cur_pos + cur_match_len) & TDEFL_LZ_DICT_SIZE_MASK; MZ_ASSERT(lookahead_size >= cur_match_len); lookahead_size -= cur_match_len; if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) { int n; d->m_lookahead_pos = lookahead_pos; d->m_lookahead_size = lookahead_size; d->m_dict_size = dict_size; d->m_total_lz_bytes = total_lz_bytes; d->m_pLZ_code_buf = pLZ_code_buf; d->m_pLZ_flags = pLZ_flags; d->m_num_flags_left = num_flags_left; if ((n = tdefl_flush_block(d, 0)) != 0) return (n < 0) ? MZ_FALSE : MZ_TRUE; total_lz_bytes = d->m_total_lz_bytes; pLZ_code_buf = d->m_pLZ_code_buf; pLZ_flags = d->m_pLZ_flags; num_flags_left = d->m_num_flags_left; } } while (lookahead_size) { mz_uint8 lit = d->m_dict[cur_pos]; total_lz_bytes++; *pLZ_code_buf++ = lit; *pLZ_flags = (mz_uint8)(*pLZ_flags >> 1); if (--num_flags_left == 0) { num_flags_left = 8; pLZ_flags = pLZ_code_buf++; } d->m_huff_count[0][lit]++; lookahead_pos++; dict_size = MZ_MIN(dict_size + 1, TDEFL_LZ_DICT_SIZE); cur_pos = (cur_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK; lookahead_size--; if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) { int n; d->m_lookahead_pos = lookahead_pos; d->m_lookahead_size = lookahead_size; d->m_dict_size = dict_size; d->m_total_lz_bytes = total_lz_bytes; d->m_pLZ_code_buf = pLZ_code_buf; d->m_pLZ_flags = pLZ_flags; d->m_num_flags_left = num_flags_left; if ((n = tdefl_flush_block(d, 0)) != 0) return (n < 0) ? MZ_FALSE : MZ_TRUE; total_lz_bytes = d->m_total_lz_bytes; pLZ_code_buf = d->m_pLZ_code_buf; pLZ_flags = d->m_pLZ_flags; num_flags_left = d->m_num_flags_left; } } } d->m_lookahead_pos = lookahead_pos; d->m_lookahead_size = lookahead_size; d->m_dict_size = dict_size; d->m_total_lz_bytes = total_lz_bytes; d->m_pLZ_code_buf = pLZ_code_buf; d->m_pLZ_flags = pLZ_flags; d->m_num_flags_left = num_flags_left; return MZ_TRUE; } #endif // MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN static MZ_FORCEINLINE void tdefl_record_literal(tdefl_compressor *d, mz_uint8 lit) { d->m_total_lz_bytes++; *d->m_pLZ_code_buf++ = lit; *d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> 1); if (--d->m_num_flags_left == 0) { d->m_num_flags_left = 8; d->m_pLZ_flags = d->m_pLZ_code_buf++; } d->m_huff_count[0][lit]++; } static MZ_FORCEINLINE void tdefl_record_match(tdefl_compressor *d, mz_uint match_len, mz_uint match_dist) { mz_uint32 s0, s1; MZ_ASSERT((match_len >= TDEFL_MIN_MATCH_LEN) && (match_dist >= 1) && (match_dist <= TDEFL_LZ_DICT_SIZE)); d->m_total_lz_bytes += match_len; d->m_pLZ_code_buf[0] = (mz_uint8)(match_len - TDEFL_MIN_MATCH_LEN); match_dist -= 1; d->m_pLZ_code_buf[1] = (mz_uint8)(match_dist & 0xFF); d->m_pLZ_code_buf[2] = (mz_uint8)(match_dist >> 8); d->m_pLZ_code_buf += 3; *d->m_pLZ_flags = (mz_uint8)((*d->m_pLZ_flags >> 1) | 0x80); if (--d->m_num_flags_left == 0) { d->m_num_flags_left = 8; d->m_pLZ_flags = d->m_pLZ_code_buf++; } s0 = s_tdefl_small_dist_sym[match_dist & 511]; s1 = s_tdefl_large_dist_sym[(match_dist >> 8) & 127]; d->m_huff_count[1][(match_dist < 512) ? s0 : s1]++; if (match_len >= TDEFL_MIN_MATCH_LEN) d->m_huff_count[0][s_tdefl_len_sym[match_len - TDEFL_MIN_MATCH_LEN]]++; } static mz_bool tdefl_compress_normal(tdefl_compressor *d) { const mz_uint8 *pSrc = d->m_pSrc; size_t src_buf_left = d->m_src_buf_left; tdefl_flush flush = d->m_flush; while ((src_buf_left) || ((flush) && (d->m_lookahead_size))) { mz_uint len_to_move, cur_match_dist, cur_match_len, cur_pos; // Update dictionary and hash chains. Keeps the lookahead size equal to // TDEFL_MAX_MATCH_LEN. if ((d->m_lookahead_size + d->m_dict_size) >= (TDEFL_MIN_MATCH_LEN - 1)) { mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK, ins_pos = d->m_lookahead_pos + d->m_lookahead_size - 2; mz_uint hash = (d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] << TDEFL_LZ_HASH_SHIFT) ^ d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK]; mz_uint num_bytes_to_process = (mz_uint)MZ_MIN( src_buf_left, TDEFL_MAX_MATCH_LEN - d->m_lookahead_size); const mz_uint8 *pSrc_end = pSrc + num_bytes_to_process; src_buf_left -= num_bytes_to_process; d->m_lookahead_size += num_bytes_to_process; while (pSrc != pSrc_end) { mz_uint8 c = *pSrc++; d->m_dict[dst_pos] = c; if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1)) d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c; hash = ((hash << TDEFL_LZ_HASH_SHIFT) ^ c) & (TDEFL_LZ_HASH_SIZE - 1); d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash]; d->m_hash[hash] = (mz_uint16)(ins_pos); dst_pos = (dst_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK; ins_pos++; } } else { while ((src_buf_left) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN)) { mz_uint8 c = *pSrc++; mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK; src_buf_left--; d->m_dict[dst_pos] = c; if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1)) d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c; if ((++d->m_lookahead_size + d->m_dict_size) >= TDEFL_MIN_MATCH_LEN) { mz_uint ins_pos = d->m_lookahead_pos + (d->m_lookahead_size - 1) - 2; mz_uint hash = ((d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] << (TDEFL_LZ_HASH_SHIFT * 2)) ^ (d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK] << TDEFL_LZ_HASH_SHIFT) ^ c) & (TDEFL_LZ_HASH_SIZE - 1); d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash]; d->m_hash[hash] = (mz_uint16)(ins_pos); } } } d->m_dict_size = MZ_MIN(TDEFL_LZ_DICT_SIZE - d->m_lookahead_size, d->m_dict_size); if ((!flush) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN)) break; // Simple lazy/greedy parsing state machine. len_to_move = 1; cur_match_dist = 0; cur_match_len = d->m_saved_match_len ? d->m_saved_match_len : (TDEFL_MIN_MATCH_LEN - 1); cur_pos = d->m_lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK; if (d->m_flags & (TDEFL_RLE_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS)) { if ((d->m_dict_size) && (!(d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS))) { mz_uint8 c = d->m_dict[(cur_pos - 1) & TDEFL_LZ_DICT_SIZE_MASK]; cur_match_len = 0; while (cur_match_len < d->m_lookahead_size) { if (d->m_dict[cur_pos + cur_match_len] != c) break; cur_match_len++; } if (cur_match_len < TDEFL_MIN_MATCH_LEN) cur_match_len = 0; else cur_match_dist = 1; } } else { tdefl_find_match(d, d->m_lookahead_pos, d->m_dict_size, d->m_lookahead_size, &cur_match_dist, &cur_match_len); } if (((cur_match_len == TDEFL_MIN_MATCH_LEN) && (cur_match_dist >= 8U * 1024U)) || (cur_pos == cur_match_dist) || ((d->m_flags & TDEFL_FILTER_MATCHES) && (cur_match_len <= 5))) { cur_match_dist = cur_match_len = 0; } if (d->m_saved_match_len) { if (cur_match_len > d->m_saved_match_len) { tdefl_record_literal(d, (mz_uint8)d->m_saved_lit); if (cur_match_len >= 128) { tdefl_record_match(d, cur_match_len, cur_match_dist); d->m_saved_match_len = 0; len_to_move = cur_match_len; } else { d->m_saved_lit = d->m_dict[cur_pos]; d->m_saved_match_dist = cur_match_dist; d->m_saved_match_len = cur_match_len; } } else { tdefl_record_match(d, d->m_saved_match_len, d->m_saved_match_dist); len_to_move = d->m_saved_match_len - 1; d->m_saved_match_len = 0; } } else if (!cur_match_dist) tdefl_record_literal(d, d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)]); else if ((d->m_greedy_parsing) || (d->m_flags & TDEFL_RLE_MATCHES) || (cur_match_len >= 128)) { tdefl_record_match(d, cur_match_len, cur_match_dist); len_to_move = cur_match_len; } else { d->m_saved_lit = d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)]; d->m_saved_match_dist = cur_match_dist; d->m_saved_match_len = cur_match_len; } // Move the lookahead forward by len_to_move bytes. d->m_lookahead_pos += len_to_move; MZ_ASSERT(d->m_lookahead_size >= len_to_move); d->m_lookahead_size -= len_to_move; d->m_dict_size = MZ_MIN(d->m_dict_size + len_to_move, (mz_uint)TDEFL_LZ_DICT_SIZE); // Check if it's time to flush the current LZ codes to the internal output // buffer. if ((d->m_pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) || ((d->m_total_lz_bytes > 31 * 1024) && (((((mz_uint)(d->m_pLZ_code_buf - d->m_lz_code_buf) * 115) >> 7) >= d->m_total_lz_bytes) || (d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS)))) { int n; d->m_pSrc = pSrc; d->m_src_buf_left = src_buf_left; if ((n = tdefl_flush_block(d, 0)) != 0) return (n < 0) ? MZ_FALSE : MZ_TRUE; } } d->m_pSrc = pSrc; d->m_src_buf_left = src_buf_left; return MZ_TRUE; } static tdefl_status tdefl_flush_output_buffer(tdefl_compressor *d) { if (d->m_pIn_buf_size) { *d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf; } if (d->m_pOut_buf_size) { size_t n = MZ_MIN(*d->m_pOut_buf_size - d->m_out_buf_ofs, d->m_output_flush_remaining); memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs, d->m_output_buf + d->m_output_flush_ofs, n); d->m_output_flush_ofs += (mz_uint)n; d->m_output_flush_remaining -= (mz_uint)n; d->m_out_buf_ofs += n; *d->m_pOut_buf_size = d->m_out_buf_ofs; } return (d->m_finished && !d->m_output_flush_remaining) ? TDEFL_STATUS_DONE : TDEFL_STATUS_OKAY; } tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf, size_t *pIn_buf_size, void *pOut_buf, size_t *pOut_buf_size, tdefl_flush flush) { if (!d) { if (pIn_buf_size) *pIn_buf_size = 0; if (pOut_buf_size) *pOut_buf_size = 0; return TDEFL_STATUS_BAD_PARAM; } d->m_pIn_buf = pIn_buf; d->m_pIn_buf_size = pIn_buf_size; d->m_pOut_buf = pOut_buf; d->m_pOut_buf_size = pOut_buf_size; d->m_pSrc = (const mz_uint8 *)(pIn_buf); d->m_src_buf_left = pIn_buf_size ? *pIn_buf_size : 0; d->m_out_buf_ofs = 0; d->m_flush = flush; if (((d->m_pPut_buf_func != NULL) == ((pOut_buf != NULL) || (pOut_buf_size != NULL))) || (d->m_prev_return_status != TDEFL_STATUS_OKAY) || (d->m_wants_to_finish && (flush != TDEFL_FINISH)) || (pIn_buf_size && *pIn_buf_size && !pIn_buf) || (pOut_buf_size && *pOut_buf_size && !pOut_buf)) { if (pIn_buf_size) *pIn_buf_size = 0; if (pOut_buf_size) *pOut_buf_size = 0; return (d->m_prev_return_status = TDEFL_STATUS_BAD_PARAM); } d->m_wants_to_finish |= (flush == TDEFL_FINISH); if ((d->m_output_flush_remaining) || (d->m_finished)) return (d->m_prev_return_status = tdefl_flush_output_buffer(d)); #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN if (((d->m_flags & TDEFL_MAX_PROBES_MASK) == 1) && ((d->m_flags & TDEFL_GREEDY_PARSING_FLAG) != 0) && ((d->m_flags & (TDEFL_FILTER_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS | TDEFL_RLE_MATCHES)) == 0)) { if (!tdefl_compress_fast(d)) return d->m_prev_return_status; } else #endif // #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN { if (!tdefl_compress_normal(d)) return d->m_prev_return_status; } if ((d->m_flags & (TDEFL_WRITE_ZLIB_HEADER | TDEFL_COMPUTE_ADLER32)) && (pIn_buf)) d->m_adler32 = (mz_uint32)mz_adler32(d->m_adler32, (const mz_uint8 *)pIn_buf, d->m_pSrc - (const mz_uint8 *)pIn_buf); if ((flush) && (!d->m_lookahead_size) && (!d->m_src_buf_left) && (!d->m_output_flush_remaining)) { if (tdefl_flush_block(d, flush) < 0) return d->m_prev_return_status; d->m_finished = (flush == TDEFL_FINISH); if (flush == TDEFL_FULL_FLUSH) { MZ_CLEAR_OBJ(d->m_hash); MZ_CLEAR_OBJ(d->m_next); d->m_dict_size = 0; } } return (d->m_prev_return_status = tdefl_flush_output_buffer(d)); } tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf, size_t in_buf_size, tdefl_flush flush) { MZ_ASSERT(d->m_pPut_buf_func); return tdefl_compress(d, pIn_buf, &in_buf_size, NULL, NULL, flush); } tdefl_status tdefl_init(tdefl_compressor *d, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags) { d->m_pPut_buf_func = pPut_buf_func; d->m_pPut_buf_user = pPut_buf_user; d->m_flags = (mz_uint)(flags); d->m_max_probes[0] = 1 + ((flags & 0xFFF) + 2) / 3; d->m_greedy_parsing = (flags & TDEFL_GREEDY_PARSING_FLAG) != 0; d->m_max_probes[1] = 1 + (((flags & 0xFFF) >> 2) + 2) / 3; if (!(flags & TDEFL_NONDETERMINISTIC_PARSING_FLAG)) MZ_CLEAR_OBJ(d->m_hash); d->m_lookahead_pos = d->m_lookahead_size = d->m_dict_size = d->m_total_lz_bytes = d->m_lz_code_buf_dict_pos = d->m_bits_in = 0; d->m_output_flush_ofs = d->m_output_flush_remaining = d->m_finished = d->m_block_index = d->m_bit_buffer = d->m_wants_to_finish = 0; d->m_pLZ_code_buf = d->m_lz_code_buf + 1; d->m_pLZ_flags = d->m_lz_code_buf; d->m_num_flags_left = 8; d->m_pOutput_buf = d->m_output_buf; d->m_pOutput_buf_end = d->m_output_buf; d->m_prev_return_status = TDEFL_STATUS_OKAY; d->m_saved_match_dist = d->m_saved_match_len = d->m_saved_lit = 0; d->m_adler32 = 1; d->m_pIn_buf = NULL; d->m_pOut_buf = NULL; d->m_pIn_buf_size = NULL; d->m_pOut_buf_size = NULL; d->m_flush = TDEFL_NO_FLUSH; d->m_pSrc = NULL; d->m_src_buf_left = 0; d->m_out_buf_ofs = 0; memset(&d->m_huff_count[0][0], 0, sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0); memset(&d->m_huff_count[1][0], 0, sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1); return TDEFL_STATUS_OKAY; } tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d) { return d->m_prev_return_status; } mz_uint32 tdefl_get_adler32(tdefl_compressor *d) { return d->m_adler32; } mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags) { tdefl_compressor *pComp; mz_bool succeeded; if (((buf_len) && (!pBuf)) || (!pPut_buf_func)) return MZ_FALSE; pComp = (tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor)); if (!pComp) return MZ_FALSE; succeeded = (tdefl_init(pComp, pPut_buf_func, pPut_buf_user, flags) == TDEFL_STATUS_OKAY); succeeded = succeeded && (tdefl_compress_buffer(pComp, pBuf, buf_len, TDEFL_FINISH) == TDEFL_STATUS_DONE); MZ_FREE(pComp); return succeeded; } typedef struct { size_t m_size, m_capacity; mz_uint8 *m_pBuf; mz_bool m_expandable; } tdefl_output_buffer; static mz_bool tdefl_output_buffer_putter(const void *pBuf, int len, void *pUser) { tdefl_output_buffer *p = (tdefl_output_buffer *)pUser; size_t new_size = p->m_size + len; if (new_size > p->m_capacity) { size_t new_capacity = p->m_capacity; mz_uint8 *pNew_buf; if (!p->m_expandable) return MZ_FALSE; do { new_capacity = MZ_MAX(128U, new_capacity << 1U); } while (new_size > new_capacity); pNew_buf = (mz_uint8 *)MZ_REALLOC(p->m_pBuf, new_capacity); if (!pNew_buf) return MZ_FALSE; p->m_pBuf = pNew_buf; p->m_capacity = new_capacity; } memcpy((mz_uint8 *)p->m_pBuf + p->m_size, pBuf, len); p->m_size = new_size; return MZ_TRUE; } void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags) { tdefl_output_buffer out_buf; MZ_CLEAR_OBJ(out_buf); if (!pOut_len) return MZ_FALSE; else *pOut_len = 0; out_buf.m_expandable = MZ_TRUE; if (!tdefl_compress_mem_to_output( pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags)) return NULL; *pOut_len = out_buf.m_size; return out_buf.m_pBuf; } size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags) { tdefl_output_buffer out_buf; MZ_CLEAR_OBJ(out_buf); if (!pOut_buf) return 0; out_buf.m_pBuf = (mz_uint8 *)pOut_buf; out_buf.m_capacity = out_buf_len; if (!tdefl_compress_mem_to_output( pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags)) return 0; return out_buf.m_size; } #ifndef MINIZ_NO_ZLIB_APIS static const mz_uint s_tdefl_num_probes[11] = {0, 1, 6, 32, 16, 32, 128, 256, 512, 768, 1500}; // level may actually range from [0,10] (10 is a "hidden" max level, where we // want a bit more compression and it's fine if throughput to fall off a cliff // on some files). mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits, int strategy) { mz_uint comp_flags = s_tdefl_num_probes[(level >= 0) ? MZ_MIN(10, level) : MZ_DEFAULT_LEVEL] | ((level <= 3) ? TDEFL_GREEDY_PARSING_FLAG : 0); if (window_bits > 0) comp_flags |= TDEFL_WRITE_ZLIB_HEADER; if (!level) comp_flags |= TDEFL_FORCE_ALL_RAW_BLOCKS; else if (strategy == MZ_FILTERED) comp_flags |= TDEFL_FILTER_MATCHES; else if (strategy == MZ_HUFFMAN_ONLY) comp_flags &= ~TDEFL_MAX_PROBES_MASK; else if (strategy == MZ_FIXED) comp_flags |= TDEFL_FORCE_ALL_STATIC_BLOCKS; else if (strategy == MZ_RLE) comp_flags |= TDEFL_RLE_MATCHES; return comp_flags; } #endif // MINIZ_NO_ZLIB_APIS #ifdef _MSC_VER #pragma warning(push) #pragma warning(disable : 4204) // nonstandard extension used : non-constant // aggregate initializer (also supported by GNU // C and C99, so no big deal) #pragma warning(disable : 4244) // 'initializing': conversion from '__int64' to // 'int', possible loss of data #pragma warning(disable : 4267) // 'argument': conversion from '__int64' to // 'int', possible loss of data #pragma warning(disable : 4996) // 'strdup': The POSIX name for this item is // deprecated. Instead, use the ISO C and C++ // conformant name: _strdup. #endif // Simple PNG writer function by Alex Evans, 2011. Released into the public // domain: https://gist.github.com/908299, more context at // http://altdevblogaday.org/2011/04/06/a-smaller-jpg-encoder/. // This is actually a modification of Alex's original code so PNG files // generated by this function pass pngcheck. void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w, int h, int num_chans, size_t *pLen_out, mz_uint level, mz_bool flip) { // Using a local copy of this array here in case MINIZ_NO_ZLIB_APIS was // defined. static const mz_uint s_tdefl_png_num_probes[11] = { 0, 1, 6, 32, 16, 32, 128, 256, 512, 768, 1500}; tdefl_compressor *pComp = (tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor)); tdefl_output_buffer out_buf; int i, bpl = w * num_chans, y, z; mz_uint32 c; *pLen_out = 0; if (!pComp) return NULL; MZ_CLEAR_OBJ(out_buf); out_buf.m_expandable = MZ_TRUE; out_buf.m_capacity = 57 + MZ_MAX(64, (1 + bpl) * h); if (NULL == (out_buf.m_pBuf = (mz_uint8 *)MZ_MALLOC(out_buf.m_capacity))) { MZ_FREE(pComp); return NULL; } // write dummy header for (z = 41; z; --z) tdefl_output_buffer_putter(&z, 1, &out_buf); // compress image data tdefl_init( pComp, tdefl_output_buffer_putter, &out_buf, s_tdefl_png_num_probes[MZ_MIN(10, level)] | TDEFL_WRITE_ZLIB_HEADER); for (y = 0; y < h; ++y) { tdefl_compress_buffer(pComp, &z, 1, TDEFL_NO_FLUSH); tdefl_compress_buffer(pComp, (mz_uint8 *)pImage + (flip ? (h - 1 - y) : y) * bpl, bpl, TDEFL_NO_FLUSH); } if (tdefl_compress_buffer(pComp, NULL, 0, TDEFL_FINISH) != TDEFL_STATUS_DONE) { MZ_FREE(pComp); MZ_FREE(out_buf.m_pBuf); return NULL; } // write real header *pLen_out = out_buf.m_size - 41; { static const mz_uint8 chans[] = {0x00, 0x00, 0x04, 0x02, 0x06}; mz_uint8 pnghdr[41] = {0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a, 0x00, 0x00, 0x00, 0x0d, 0x49, 0x48, 0x44, 0x52, 0, 0, (mz_uint8)(w >> 8), (mz_uint8)w, 0, 0, (mz_uint8)(h >> 8), (mz_uint8)h, 8, chans[num_chans], 0, 0, 0, 0, 0, 0, 0, (mz_uint8)(*pLen_out >> 24), (mz_uint8)(*pLen_out >> 16), (mz_uint8)(*pLen_out >> 8), (mz_uint8)*pLen_out, 0x49, 0x44, 0x41, 0x54}; c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, pnghdr + 12, 17); for (i = 0; i < 4; ++i, c <<= 8) ((mz_uint8 *)(pnghdr + 29))[i] = (mz_uint8)(c >> 24); memcpy(out_buf.m_pBuf, pnghdr, 41); } // write footer (IDAT CRC-32, followed by IEND chunk) if (!tdefl_output_buffer_putter( "\0\0\0\0\0\0\0\0\x49\x45\x4e\x44\xae\x42\x60\x82", 16, &out_buf)) { *pLen_out = 0; MZ_FREE(pComp); MZ_FREE(out_buf.m_pBuf); return NULL; } c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, out_buf.m_pBuf + 41 - 4, *pLen_out + 4); for (i = 0; i < 4; ++i, c <<= 8) (out_buf.m_pBuf + out_buf.m_size - 16)[i] = (mz_uint8)(c >> 24); // compute final size of file, grab compressed data buffer and return *pLen_out += 57; MZ_FREE(pComp); return out_buf.m_pBuf; } void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h, int num_chans, size_t *pLen_out) { // Level 6 corresponds to TDEFL_DEFAULT_MAX_PROBES or MZ_DEFAULT_LEVEL (but we // can't depend on MZ_DEFAULT_LEVEL being available in case the zlib API's // where #defined out) return tdefl_write_image_to_png_file_in_memory_ex(pImage, w, h, num_chans, pLen_out, 6, MZ_FALSE); } // ------------------- .ZIP archive reading #ifndef MINIZ_NO_ARCHIVE_APIS #error "No arvhive APIs" #ifdef MINIZ_NO_STDIO #define MZ_FILE void * #else #include <stdio.h> #include <sys/stat.h> #if defined(_MSC_VER) || defined(__MINGW64__) static FILE *mz_fopen(const char *pFilename, const char *pMode) { FILE *pFile = NULL; fopen_s(&pFile, pFilename, pMode); return pFile; } static FILE *mz_freopen(const char *pPath, const char *pMode, FILE *pStream) { FILE *pFile = NULL; if (freopen_s(&pFile, pPath, pMode, pStream)) return NULL; return pFile; } #ifndef MINIZ_NO_TIME #include <sys/utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN mz_fopen #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 _ftelli64 #define MZ_FSEEK64 _fseeki64 #define MZ_FILE_STAT_STRUCT _stat #define MZ_FILE_STAT _stat #define MZ_FFLUSH fflush #define MZ_FREOPEN mz_freopen #define MZ_DELETE_FILE remove #elif defined(__MINGW32__) #ifndef MINIZ_NO_TIME #include <sys/utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN(f, m) fopen(f, m) #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 ftello64 #define MZ_FSEEK64 fseeko64 #define MZ_FILE_STAT_STRUCT _stat #define MZ_FILE_STAT _stat #define MZ_FFLUSH fflush #define MZ_FREOPEN(f, m, s) freopen(f, m, s) #define MZ_DELETE_FILE remove #elif defined(__TINYC__) #ifndef MINIZ_NO_TIME #include <sys/utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN(f, m) fopen(f, m) #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 ftell #define MZ_FSEEK64 fseek #define MZ_FILE_STAT_STRUCT stat #define MZ_FILE_STAT stat #define MZ_FFLUSH fflush #define MZ_FREOPEN(f, m, s) freopen(f, m, s) #define MZ_DELETE_FILE remove #elif defined(__GNUC__) && defined(_LARGEFILE64_SOURCE) && _LARGEFILE64_SOURCE #ifndef MINIZ_NO_TIME #include <utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN(f, m) fopen64(f, m) #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 ftello64 #define MZ_FSEEK64 fseeko64 #define MZ_FILE_STAT_STRUCT stat64 #define MZ_FILE_STAT stat64 #define MZ_FFLUSH fflush #define MZ_FREOPEN(p, m, s) freopen64(p, m, s) #define MZ_DELETE_FILE remove #else #ifndef MINIZ_NO_TIME #include <utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN(f, m) fopen(f, m) #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 ftello #define MZ_FSEEK64 fseeko #define MZ_FILE_STAT_STRUCT stat #define MZ_FILE_STAT stat #define MZ_FFLUSH fflush #define MZ_FREOPEN(f, m, s) freopen(f, m, s) #define MZ_DELETE_FILE remove #endif // #ifdef _MSC_VER #endif // #ifdef MINIZ_NO_STDIO #define MZ_TOLOWER(c) ((((c) >= 'A') && ((c) <= 'Z')) ? ((c) - 'A' + 'a') : (c)) // Various ZIP archive enums. To completely avoid cross platform compiler // alignment and platform endian issues, miniz.c doesn't use structs for any of // this stuff. enum { // ZIP archive identifiers and record sizes MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG = 0x06054b50, MZ_ZIP_CENTRAL_DIR_HEADER_SIG = 0x02014b50, MZ_ZIP_LOCAL_DIR_HEADER_SIG = 0x04034b50, MZ_ZIP_LOCAL_DIR_HEADER_SIZE = 30, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE = 46, MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE = 22, // Central directory header record offsets MZ_ZIP_CDH_SIG_OFS = 0, MZ_ZIP_CDH_VERSION_MADE_BY_OFS = 4, MZ_ZIP_CDH_VERSION_NEEDED_OFS = 6, MZ_ZIP_CDH_BIT_FLAG_OFS = 8, MZ_ZIP_CDH_METHOD_OFS = 10, MZ_ZIP_CDH_FILE_TIME_OFS = 12, MZ_ZIP_CDH_FILE_DATE_OFS = 14, MZ_ZIP_CDH_CRC32_OFS = 16, MZ_ZIP_CDH_COMPRESSED_SIZE_OFS = 20, MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS = 24, MZ_ZIP_CDH_FILENAME_LEN_OFS = 28, MZ_ZIP_CDH_EXTRA_LEN_OFS = 30, MZ_ZIP_CDH_COMMENT_LEN_OFS = 32, MZ_ZIP_CDH_DISK_START_OFS = 34, MZ_ZIP_CDH_INTERNAL_ATTR_OFS = 36, MZ_ZIP_CDH_EXTERNAL_ATTR_OFS = 38, MZ_ZIP_CDH_LOCAL_HEADER_OFS = 42, // Local directory header offsets MZ_ZIP_LDH_SIG_OFS = 0, MZ_ZIP_LDH_VERSION_NEEDED_OFS = 4, MZ_ZIP_LDH_BIT_FLAG_OFS = 6, MZ_ZIP_LDH_METHOD_OFS = 8, MZ_ZIP_LDH_FILE_TIME_OFS = 10, MZ_ZIP_LDH_FILE_DATE_OFS = 12, MZ_ZIP_LDH_CRC32_OFS = 14, MZ_ZIP_LDH_COMPRESSED_SIZE_OFS = 18, MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS = 22, MZ_ZIP_LDH_FILENAME_LEN_OFS = 26, MZ_ZIP_LDH_EXTRA_LEN_OFS = 28, // End of central directory offsets MZ_ZIP_ECDH_SIG_OFS = 0, MZ_ZIP_ECDH_NUM_THIS_DISK_OFS = 4, MZ_ZIP_ECDH_NUM_DISK_CDIR_OFS = 6, MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS = 8, MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS = 10, MZ_ZIP_ECDH_CDIR_SIZE_OFS = 12, MZ_ZIP_ECDH_CDIR_OFS_OFS = 16, MZ_ZIP_ECDH_COMMENT_SIZE_OFS = 20, }; typedef struct { void *m_p; size_t m_size, m_capacity; mz_uint m_element_size; } mz_zip_array; struct mz_zip_internal_state_tag { mz_zip_array m_central_dir; mz_zip_array m_central_dir_offsets; mz_zip_array m_sorted_central_dir_offsets; MZ_FILE *m_pFile; void *m_pMem; size_t m_mem_size; size_t m_mem_capacity; }; #define MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(array_ptr, element_size) \ (array_ptr)->m_element_size = element_size #define MZ_ZIP_ARRAY_ELEMENT(array_ptr, element_type, index) \ ((element_type *)((array_ptr)->m_p))[index] static MZ_FORCEINLINE void mz_zip_array_clear(mz_zip_archive *pZip, mz_zip_array *pArray) { pZip->m_pFree(pZip->m_pAlloc_opaque, pArray->m_p); memset(pArray, 0, sizeof(mz_zip_array)); } static mz_bool mz_zip_array_ensure_capacity(mz_zip_archive *pZip, mz_zip_array *pArray, size_t min_new_capacity, mz_uint growing) { void *pNew_p; size_t new_capacity = min_new_capacity; MZ_ASSERT(pArray->m_element_size); if (pArray->m_capacity >= min_new_capacity) return MZ_TRUE; if (growing) { new_capacity = MZ_MAX(1, pArray->m_capacity); while (new_capacity < min_new_capacity) new_capacity *= 2; } if (NULL == (pNew_p = pZip->m_pRealloc(pZip->m_pAlloc_opaque, pArray->m_p, pArray->m_element_size, new_capacity))) return MZ_FALSE; pArray->m_p = pNew_p; pArray->m_capacity = new_capacity; return MZ_TRUE; } static MZ_FORCEINLINE mz_bool mz_zip_array_reserve(mz_zip_archive *pZip, mz_zip_array *pArray, size_t new_capacity, mz_uint growing) { if (new_capacity > pArray->m_capacity) { if (!mz_zip_array_ensure_capacity(pZip, pArray, new_capacity, growing)) return MZ_FALSE; } return MZ_TRUE; } static MZ_FORCEINLINE mz_bool mz_zip_array_resize(mz_zip_archive *pZip, mz_zip_array *pArray, size_t new_size, mz_uint growing) { if (new_size > pArray->m_capacity) { if (!mz_zip_array_ensure_capacity(pZip, pArray, new_size, growing)) return MZ_FALSE; } pArray->m_size = new_size; return MZ_TRUE; } static MZ_FORCEINLINE mz_bool mz_zip_array_ensure_room(mz_zip_archive *pZip, mz_zip_array *pArray, size_t n) { return mz_zip_array_reserve(pZip, pArray, pArray->m_size + n, MZ_TRUE); } static MZ_FORCEINLINE mz_bool mz_zip_array_push_back(mz_zip_archive *pZip, mz_zip_array *pArray, const void *pElements, size_t n) { size_t orig_size = pArray->m_size; if (!mz_zip_array_resize(pZip, pArray, orig_size + n, MZ_TRUE)) return MZ_FALSE; memcpy((mz_uint8 *)pArray->m_p + orig_size * pArray->m_element_size, pElements, n * pArray->m_element_size); return MZ_TRUE; } #ifndef MINIZ_NO_TIME static time_t mz_zip_dos_to_time_t(int dos_time, int dos_date) { struct tm tm; memset(&tm, 0, sizeof(tm)); tm.tm_isdst = -1; tm.tm_year = ((dos_date >> 9) & 127) + 1980 - 1900; tm.tm_mon = ((dos_date >> 5) & 15) - 1; tm.tm_mday = dos_date & 31; tm.tm_hour = (dos_time >> 11) & 31; tm.tm_min = (dos_time >> 5) & 63; tm.tm_sec = (dos_time << 1) & 62; return mktime(&tm); } static void mz_zip_time_to_dos_time(time_t time, mz_uint16 *pDOS_time, mz_uint16 *pDOS_date) { #ifdef _MSC_VER struct tm tm_struct; struct tm *tm = &tm_struct; errno_t err = localtime_s(tm, &time); if (err) { *pDOS_date = 0; *pDOS_time = 0; return; } #else struct tm *tm = localtime(&time); #endif *pDOS_time = (mz_uint16)(((tm->tm_hour) << 11) + ((tm->tm_min) << 5) + ((tm->tm_sec) >> 1)); *pDOS_date = (mz_uint16)(((tm->tm_year + 1900 - 1980) << 9) + ((tm->tm_mon + 1) << 5) + tm->tm_mday); } #endif #ifndef MINIZ_NO_STDIO static mz_bool mz_zip_get_file_modified_time(const char *pFilename, mz_uint16 *pDOS_time, mz_uint16 *pDOS_date) { #ifdef MINIZ_NO_TIME (void)pFilename; *pDOS_date = *pDOS_time = 0; #else struct MZ_FILE_STAT_STRUCT file_stat; // On Linux with x86 glibc, this call will fail on large files (>= 0x80000000 // bytes) unless you compiled with _LARGEFILE64_SOURCE. Argh. if (MZ_FILE_STAT(pFilename, &file_stat) != 0) return MZ_FALSE; mz_zip_time_to_dos_time(file_stat.st_mtime, pDOS_time, pDOS_date); #endif // #ifdef MINIZ_NO_TIME return MZ_TRUE; } #ifndef MINIZ_NO_TIME static mz_bool mz_zip_set_file_times(const char *pFilename, time_t access_time, time_t modified_time) { struct utimbuf t; t.actime = access_time; t.modtime = modified_time; return !utime(pFilename, &t); } #endif // #ifndef MINIZ_NO_TIME #endif // #ifndef MINIZ_NO_STDIO static mz_bool mz_zip_reader_init_internal(mz_zip_archive *pZip, mz_uint32 flags) { (void)flags; if ((!pZip) || (pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_INVALID)) return MZ_FALSE; if (!pZip->m_pAlloc) pZip->m_pAlloc = def_alloc_func; if (!pZip->m_pFree) pZip->m_pFree = def_free_func; if (!pZip->m_pRealloc) pZip->m_pRealloc = def_realloc_func; pZip->m_zip_mode = MZ_ZIP_MODE_READING; pZip->m_archive_size = 0; pZip->m_central_directory_file_ofs = 0; pZip->m_total_files = 0; if (NULL == (pZip->m_pState = (mz_zip_internal_state *)pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, sizeof(mz_zip_internal_state)))) return MZ_FALSE; memset(pZip->m_pState, 0, sizeof(mz_zip_internal_state)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir, sizeof(mz_uint8)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir_offsets, sizeof(mz_uint32)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_sorted_central_dir_offsets, sizeof(mz_uint32)); return MZ_TRUE; } static MZ_FORCEINLINE mz_bool mz_zip_reader_filename_less(const mz_zip_array *pCentral_dir_array, const mz_zip_array *pCentral_dir_offsets, mz_uint l_index, mz_uint r_index) { const mz_uint8 *pL = &MZ_ZIP_ARRAY_ELEMENT( pCentral_dir_array, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, l_index)), *pE; const mz_uint8 *pR = &MZ_ZIP_ARRAY_ELEMENT( pCentral_dir_array, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, r_index)); mz_uint l_len = MZ_READ_LE16(pL + MZ_ZIP_CDH_FILENAME_LEN_OFS), r_len = MZ_READ_LE16(pR + MZ_ZIP_CDH_FILENAME_LEN_OFS); mz_uint8 l = 0, r = 0; pL += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; pR += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; pE = pL + MZ_MIN(l_len, r_len); while (pL < pE) { if ((l = MZ_TOLOWER(*pL)) != (r = MZ_TOLOWER(*pR))) break; pL++; pR++; } return (pL == pE) ? (l_len < r_len) : (l < r); } #define MZ_SWAP_UINT32(a, b) \ do { \ mz_uint32 t = a; \ a = b; \ b = t; \ } \ MZ_MACRO_END // Heap sort of lowercased filenames, used to help accelerate plain central // directory searches by mz_zip_reader_locate_file(). (Could also use qsort(), // but it could allocate memory.) static void mz_zip_reader_sort_central_dir_offsets_by_filename( mz_zip_archive *pZip) { mz_zip_internal_state *pState = pZip->m_pState; const mz_zip_array *pCentral_dir_offsets = &pState->m_central_dir_offsets; const mz_zip_array *pCentral_dir = &pState->m_central_dir; mz_uint32 *pIndices = &MZ_ZIP_ARRAY_ELEMENT( &pState->m_sorted_central_dir_offsets, mz_uint32, 0); const int size = pZip->m_total_files; int start = (size - 2) >> 1, end; while (start >= 0) { int child, root = start; for (;;) { if ((child = (root << 1) + 1) >= size) break; child += (((child + 1) < size) && (mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, pIndices[child], pIndices[child + 1]))); if (!mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, pIndices[root], pIndices[child])) break; MZ_SWAP_UINT32(pIndices[root], pIndices[child]); root = child; } start--; } end = size - 1; while (end > 0) { int child, root = 0; MZ_SWAP_UINT32(pIndices[end], pIndices[0]); for (;;) { if ((child = (root << 1) + 1) >= end) break; child += (((child + 1) < end) && mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, pIndices[child], pIndices[child + 1])); if (!mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, pIndices[root], pIndices[child])) break; MZ_SWAP_UINT32(pIndices[root], pIndices[child]); root = child; } end--; } } static mz_bool mz_zip_reader_read_central_dir(mz_zip_archive *pZip, mz_uint32 flags) { mz_uint cdir_size, num_this_disk, cdir_disk_index; mz_uint64 cdir_ofs; mz_int64 cur_file_ofs; const mz_uint8 *p; mz_uint32 buf_u32[4096 / sizeof(mz_uint32)]; mz_uint8 *pBuf = (mz_uint8 *)buf_u32; mz_bool sort_central_dir = ((flags & MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY) == 0); // Basic sanity checks - reject files which are too small, and check the first // 4 bytes of the file to make sure a local header is there. if (pZip->m_archive_size < MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) return MZ_FALSE; // Find the end of central directory record by scanning the file from the end // towards the beginning. cur_file_ofs = MZ_MAX((mz_int64)pZip->m_archive_size - (mz_int64)sizeof(buf_u32), 0); for (;;) { int i, n = (int)MZ_MIN(sizeof(buf_u32), pZip->m_archive_size - cur_file_ofs); if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf, n) != (mz_uint)n) return MZ_FALSE; for (i = n - 4; i >= 0; --i) if (MZ_READ_LE32(pBuf + i) == MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG) break; if (i >= 0) { cur_file_ofs += i; break; } if ((!cur_file_ofs) || ((pZip->m_archive_size - cur_file_ofs) >= (0xFFFF + MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE))) return MZ_FALSE; cur_file_ofs = MZ_MAX(cur_file_ofs - (sizeof(buf_u32) - 3), 0); } // Read and verify the end of central directory record. if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf, MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) != MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) return MZ_FALSE; if ((MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_SIG_OFS) != MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG) || ((pZip->m_total_files = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS)) != MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS))) return MZ_FALSE; num_this_disk = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_NUM_THIS_DISK_OFS); cdir_disk_index = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_NUM_DISK_CDIR_OFS); if (((num_this_disk | cdir_disk_index) != 0) && ((num_this_disk != 1) || (cdir_disk_index != 1))) return MZ_FALSE; if ((cdir_size = MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_CDIR_SIZE_OFS)) < pZip->m_total_files * MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) return MZ_FALSE; cdir_ofs = MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_CDIR_OFS_OFS); if ((cdir_ofs + (mz_uint64)cdir_size) > pZip->m_archive_size) return MZ_FALSE; pZip->m_central_directory_file_ofs = cdir_ofs; if (pZip->m_total_files) { mz_uint i, n; // Read the entire central directory into a heap block, and allocate another // heap block to hold the unsorted central dir file record offsets, and // another to hold the sorted indices. if ((!mz_zip_array_resize(pZip, &pZip->m_pState->m_central_dir, cdir_size, MZ_FALSE)) || (!mz_zip_array_resize(pZip, &pZip->m_pState->m_central_dir_offsets, pZip->m_total_files, MZ_FALSE))) return MZ_FALSE; if (sort_central_dir) { if (!mz_zip_array_resize(pZip, &pZip->m_pState->m_sorted_central_dir_offsets, pZip->m_total_files, MZ_FALSE)) return MZ_FALSE; } if (pZip->m_pRead(pZip->m_pIO_opaque, cdir_ofs, pZip->m_pState->m_central_dir.m_p, cdir_size) != cdir_size) return MZ_FALSE; // Now create an index into the central directory file records, do some // basic sanity checking on each record, and check for zip64 entries (which // are not yet supported). p = (const mz_uint8 *)pZip->m_pState->m_central_dir.m_p; for (n = cdir_size, i = 0; i < pZip->m_total_files; ++i) { mz_uint total_header_size, comp_size, decomp_size, disk_index; if ((n < MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) || (MZ_READ_LE32(p) != MZ_ZIP_CENTRAL_DIR_HEADER_SIG)) return MZ_FALSE; MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32, i) = (mz_uint32)(p - (const mz_uint8 *)pZip->m_pState->m_central_dir.m_p); if (sort_central_dir) MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_sorted_central_dir_offsets, mz_uint32, i) = i; comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS); decomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS); if (((!MZ_READ_LE32(p + MZ_ZIP_CDH_METHOD_OFS)) && (decomp_size != comp_size)) || (decomp_size && !comp_size) || (decomp_size == 0xFFFFFFFF) || (comp_size == 0xFFFFFFFF)) return MZ_FALSE; disk_index = MZ_READ_LE16(p + MZ_ZIP_CDH_DISK_START_OFS); if ((disk_index != num_this_disk) && (disk_index != 1)) return MZ_FALSE; if (((mz_uint64)MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS) + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + comp_size) > pZip->m_archive_size) return MZ_FALSE; if ((total_header_size = MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS) + MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS) + MZ_READ_LE16(p + MZ_ZIP_CDH_COMMENT_LEN_OFS)) > n) return MZ_FALSE; n -= total_header_size; p += total_header_size; } } if (sort_central_dir) mz_zip_reader_sort_central_dir_offsets_by_filename(pZip); return MZ_TRUE; } mz_bool mz_zip_reader_init(mz_zip_archive *pZip, mz_uint64 size, mz_uint32 flags) { if ((!pZip) || (!pZip->m_pRead)) return MZ_FALSE; if (!mz_zip_reader_init_internal(pZip, flags)) return MZ_FALSE; pZip->m_archive_size = size; if (!mz_zip_reader_read_central_dir(pZip, flags)) { mz_zip_reader_end(pZip); return MZ_FALSE; } return MZ_TRUE; } static size_t mz_zip_mem_read_func(void *pOpaque, mz_uint64 file_ofs, void *pBuf, size_t n) { mz_zip_archive *pZip = (mz_zip_archive *)pOpaque; size_t s = (file_ofs >= pZip->m_archive_size) ? 0 : (size_t)MZ_MIN(pZip->m_archive_size - file_ofs, n); memcpy(pBuf, (const mz_uint8 *)pZip->m_pState->m_pMem + file_ofs, s); return s; } mz_bool mz_zip_reader_init_mem(mz_zip_archive *pZip, const void *pMem, size_t size, mz_uint32 flags) { if (!mz_zip_reader_init_internal(pZip, flags)) return MZ_FALSE; pZip->m_archive_size = size; pZip->m_pRead = mz_zip_mem_read_func; pZip->m_pIO_opaque = pZip; #ifdef __cplusplus pZip->m_pState->m_pMem = const_cast<void *>(pMem); #else pZip->m_pState->m_pMem = (void *)pMem; #endif pZip->m_pState->m_mem_size = size; if (!mz_zip_reader_read_central_dir(pZip, flags)) { mz_zip_reader_end(pZip); return MZ_FALSE; } return MZ_TRUE; } #ifndef MINIZ_NO_STDIO static size_t mz_zip_file_read_func(void *pOpaque, mz_uint64 file_ofs, void *pBuf, size_t n) { mz_zip_archive *pZip = (mz_zip_archive *)pOpaque; mz_int64 cur_ofs = MZ_FTELL64(pZip->m_pState->m_pFile); if (((mz_int64)file_ofs < 0) || (((cur_ofs != (mz_int64)file_ofs)) && (MZ_FSEEK64(pZip->m_pState->m_pFile, (mz_int64)file_ofs, SEEK_SET)))) return 0; return MZ_FREAD(pBuf, 1, n, pZip->m_pState->m_pFile); } mz_bool mz_zip_reader_init_file(mz_zip_archive *pZip, const char *pFilename, mz_uint32 flags) { mz_uint64 file_size; MZ_FILE *pFile = MZ_FOPEN(pFilename, "rb"); if (!pFile) return MZ_FALSE; if (MZ_FSEEK64(pFile, 0, SEEK_END)) { MZ_FCLOSE(pFile); return MZ_FALSE; } file_size = MZ_FTELL64(pFile); if (!mz_zip_reader_init_internal(pZip, flags)) { MZ_FCLOSE(pFile); return MZ_FALSE; } pZip->m_pRead = mz_zip_file_read_func; pZip->m_pIO_opaque = pZip; pZip->m_pState->m_pFile = pFile; pZip->m_archive_size = file_size; if (!mz_zip_reader_read_central_dir(pZip, flags)) { mz_zip_reader_end(pZip); return MZ_FALSE; } return MZ_TRUE; } #endif // #ifndef MINIZ_NO_STDIO mz_uint mz_zip_reader_get_num_files(mz_zip_archive *pZip) { return pZip ? pZip->m_total_files : 0; } static MZ_FORCEINLINE const mz_uint8 *mz_zip_reader_get_cdh( mz_zip_archive *pZip, mz_uint file_index) { if ((!pZip) || (!pZip->m_pState) || (file_index >= pZip->m_total_files) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING)) return NULL; return &MZ_ZIP_ARRAY_ELEMENT( &pZip->m_pState->m_central_dir, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32, file_index)); } mz_bool mz_zip_reader_is_file_encrypted(mz_zip_archive *pZip, mz_uint file_index) { mz_uint m_bit_flag; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); if (!p) return MZ_FALSE; m_bit_flag = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS); return (m_bit_flag & 1); } mz_bool mz_zip_reader_is_file_a_directory(mz_zip_archive *pZip, mz_uint file_index) { mz_uint filename_len, external_attr; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); if (!p) return MZ_FALSE; // First see if the filename ends with a '/' character. filename_len = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS); if (filename_len) { if (*(p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_len - 1) == '/') return MZ_TRUE; } // Bugfix: This code was also checking if the internal attribute was non-zero, // which wasn't correct. // Most/all zip writers (hopefully) set DOS file/directory attributes in the // low 16-bits, so check for the DOS directory flag and ignore the source OS // ID in the created by field. // FIXME: Remove this check? Is it necessary - we already check the filename. external_attr = MZ_READ_LE32(p + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS); if ((external_attr & 0x10) != 0) return MZ_TRUE; return MZ_FALSE; } mz_bool mz_zip_reader_file_stat(mz_zip_archive *pZip, mz_uint file_index, mz_zip_archive_file_stat *pStat) { mz_uint n; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); if ((!p) || (!pStat)) return MZ_FALSE; // Unpack the central directory record. pStat->m_file_index = file_index; pStat->m_central_dir_ofs = MZ_ZIP_ARRAY_ELEMENT( &pZip->m_pState->m_central_dir_offsets, mz_uint32, file_index); pStat->m_version_made_by = MZ_READ_LE16(p + MZ_ZIP_CDH_VERSION_MADE_BY_OFS); pStat->m_version_needed = MZ_READ_LE16(p + MZ_ZIP_CDH_VERSION_NEEDED_OFS); pStat->m_bit_flag = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS); pStat->m_method = MZ_READ_LE16(p + MZ_ZIP_CDH_METHOD_OFS); #ifndef MINIZ_NO_TIME pStat->m_time = mz_zip_dos_to_time_t(MZ_READ_LE16(p + MZ_ZIP_CDH_FILE_TIME_OFS), MZ_READ_LE16(p + MZ_ZIP_CDH_FILE_DATE_OFS)); #endif pStat->m_crc32 = MZ_READ_LE32(p + MZ_ZIP_CDH_CRC32_OFS); pStat->m_comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS); pStat->m_uncomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS); pStat->m_internal_attr = MZ_READ_LE16(p + MZ_ZIP_CDH_INTERNAL_ATTR_OFS); pStat->m_external_attr = MZ_READ_LE32(p + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS); pStat->m_local_header_ofs = MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS); // Copy as much of the filename and comment as possible. n = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS); n = MZ_MIN(n, MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE - 1); memcpy(pStat->m_filename, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n); pStat->m_filename[n] = '\0'; n = MZ_READ_LE16(p + MZ_ZIP_CDH_COMMENT_LEN_OFS); n = MZ_MIN(n, MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE - 1); pStat->m_comment_size = n; memcpy(pStat->m_comment, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS) + MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS), n); pStat->m_comment[n] = '\0'; return MZ_TRUE; } mz_uint mz_zip_reader_get_filename(mz_zip_archive *pZip, mz_uint file_index, char *pFilename, mz_uint filename_buf_size) { mz_uint n; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); if (!p) { if (filename_buf_size) pFilename[0] = '\0'; return 0; } n = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS); if (filename_buf_size) { n = MZ_MIN(n, filename_buf_size - 1); memcpy(pFilename, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n); pFilename[n] = '\0'; } return n + 1; } static MZ_FORCEINLINE mz_bool mz_zip_reader_string_equal(const char *pA, const char *pB, mz_uint len, mz_uint flags) { mz_uint i; if (flags & MZ_ZIP_FLAG_CASE_SENSITIVE) return 0 == memcmp(pA, pB, len); for (i = 0; i < len; ++i) if (MZ_TOLOWER(pA[i]) != MZ_TOLOWER(pB[i])) return MZ_FALSE; return MZ_TRUE; } static MZ_FORCEINLINE int mz_zip_reader_filename_compare( const mz_zip_array *pCentral_dir_array, const mz_zip_array *pCentral_dir_offsets, mz_uint l_index, const char *pR, mz_uint r_len) { const mz_uint8 *pL = &MZ_ZIP_ARRAY_ELEMENT( pCentral_dir_array, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, l_index)), *pE; mz_uint l_len = MZ_READ_LE16(pL + MZ_ZIP_CDH_FILENAME_LEN_OFS); mz_uint8 l = 0, r = 0; pL += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; pE = pL + MZ_MIN(l_len, r_len); while (pL < pE) { if ((l = MZ_TOLOWER(*pL)) != (r = MZ_TOLOWER(*pR))) break; pL++; pR++; } return (pL == pE) ? (int)(l_len - r_len) : (l - r); } static int mz_zip_reader_locate_file_binary_search(mz_zip_archive *pZip, const char *pFilename) { mz_zip_internal_state *pState = pZip->m_pState; const mz_zip_array *pCentral_dir_offsets = &pState->m_central_dir_offsets; const mz_zip_array *pCentral_dir = &pState->m_central_dir; mz_uint32 *pIndices = &MZ_ZIP_ARRAY_ELEMENT( &pState->m_sorted_central_dir_offsets, mz_uint32, 0); const int size = pZip->m_total_files; const mz_uint filename_len = (mz_uint)strlen(pFilename); int l = 0, h = size - 1; while (l <= h) { int m = (l + h) >> 1, file_index = pIndices[m], comp = mz_zip_reader_filename_compare(pCentral_dir, pCentral_dir_offsets, file_index, pFilename, filename_len); if (!comp) return file_index; else if (comp < 0) l = m + 1; else h = m - 1; } return -1; } int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName, const char *pComment, mz_uint flags) { mz_uint file_index; size_t name_len, comment_len; if ((!pZip) || (!pZip->m_pState) || (!pName) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING)) return -1; if (((flags & (MZ_ZIP_FLAG_IGNORE_PATH | MZ_ZIP_FLAG_CASE_SENSITIVE)) == 0) && (!pComment) && (pZip->m_pState->m_sorted_central_dir_offsets.m_size)) return mz_zip_reader_locate_file_binary_search(pZip, pName); name_len = strlen(pName); if (name_len > 0xFFFF) return -1; comment_len = pComment ? strlen(pComment) : 0; if (comment_len > 0xFFFF) return -1; for (file_index = 0; file_index < pZip->m_total_files; file_index++) { const mz_uint8 *pHeader = &MZ_ZIP_ARRAY_ELEMENT( &pZip->m_pState->m_central_dir, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32, file_index)); mz_uint filename_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_FILENAME_LEN_OFS); const char *pFilename = (const char *)pHeader + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; if (filename_len < name_len) continue; if (comment_len) { mz_uint file_extra_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_EXTRA_LEN_OFS), file_comment_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_COMMENT_LEN_OFS); const char *pFile_comment = pFilename + filename_len + file_extra_len; if ((file_comment_len != comment_len) || (!mz_zip_reader_string_equal(pComment, pFile_comment, file_comment_len, flags))) continue; } if ((flags & MZ_ZIP_FLAG_IGNORE_PATH) && (filename_len)) { int ofs = filename_len - 1; do { if ((pFilename[ofs] == '/') || (pFilename[ofs] == '\\') || (pFilename[ofs] == ':')) break; } while (--ofs >= 0); ofs++; pFilename += ofs; filename_len -= ofs; } if ((filename_len == name_len) && (mz_zip_reader_string_equal(pName, pFilename, filename_len, flags))) return file_index; } return -1; } mz_bool mz_zip_reader_extract_to_mem_no_alloc(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size) { int status = TINFL_STATUS_DONE; mz_uint64 needed_size, cur_file_ofs, comp_remaining, out_buf_ofs = 0, read_buf_size, read_buf_ofs = 0, read_buf_avail; mz_zip_archive_file_stat file_stat; void *pRead_buf; mz_uint32 local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) / sizeof(mz_uint32)]; mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32; tinfl_decompressor inflator; if ((buf_size) && (!pBuf)) return MZ_FALSE; if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE; // Empty file, or a directory (but not always a directory - I've seen odd zips // with directories that have compressed data which inflates to 0 bytes) if (!file_stat.m_comp_size) return MZ_TRUE; // Entry is a subdirectory (I've seen old zips with dir entries which have // compressed deflate data which inflates to 0 bytes, but these entries claim // to uncompress to 512 bytes in the headers). // I'm torn how to handle this case - should it fail instead? if (mz_zip_reader_is_file_a_directory(pZip, file_index)) return MZ_TRUE; // Encryption and patch files are not supported. if (file_stat.m_bit_flag & (1 | 32)) return MZ_FALSE; // This function only supports stored and deflate. if ((!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (file_stat.m_method != 0) && (file_stat.m_method != MZ_DEFLATED)) return MZ_FALSE; // Ensure supplied output buffer is large enough. needed_size = (flags & MZ_ZIP_FLAG_COMPRESSED_DATA) ? file_stat.m_comp_size : file_stat.m_uncomp_size; if (buf_size < needed_size) return MZ_FALSE; // Read and parse the local directory entry. cur_file_ofs = file_stat.m_local_header_ofs; if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE) return MZ_FALSE; if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG) return MZ_FALSE; cur_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS); if ((cur_file_ofs + file_stat.m_comp_size) > pZip->m_archive_size) return MZ_FALSE; if ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!file_stat.m_method)) { // The file is stored or the caller has requested the compressed data. if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf, (size_t)needed_size) != needed_size) return MZ_FALSE; return ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) != 0) || (mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, (size_t)file_stat.m_uncomp_size) == file_stat.m_crc32); } // Decompress the file either directly from memory or from a file input // buffer. tinfl_init(&inflator); if (pZip->m_pState->m_pMem) { // Read directly from the archive in memory. pRead_buf = (mz_uint8 *)pZip->m_pState->m_pMem + cur_file_ofs; read_buf_size = read_buf_avail = file_stat.m_comp_size; comp_remaining = 0; } else if (pUser_read_buf) { // Use a user provided read buffer. if (!user_read_buf_size) return MZ_FALSE; pRead_buf = (mz_uint8 *)pUser_read_buf; read_buf_size = user_read_buf_size; read_buf_avail = 0; comp_remaining = file_stat.m_comp_size; } else { // Temporarily allocate a read buffer. read_buf_size = MZ_MIN(file_stat.m_comp_size, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE); #ifdef _MSC_VER if (((0, sizeof(size_t) == sizeof(mz_uint32))) && (read_buf_size > 0x7FFFFFFF)) #else if (((sizeof(size_t) == sizeof(mz_uint32))) && (read_buf_size > 0x7FFFFFFF)) #endif return MZ_FALSE; if (NULL == (pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)read_buf_size))) return MZ_FALSE; read_buf_avail = 0; comp_remaining = file_stat.m_comp_size; } do { size_t in_buf_size, out_buf_size = (size_t)(file_stat.m_uncomp_size - out_buf_ofs); if ((!read_buf_avail) && (!pZip->m_pState->m_pMem)) { read_buf_avail = MZ_MIN(read_buf_size, comp_remaining); if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf, (size_t)read_buf_avail) != read_buf_avail) { status = TINFL_STATUS_FAILED; break; } cur_file_ofs += read_buf_avail; comp_remaining -= read_buf_avail; read_buf_ofs = 0; } in_buf_size = (size_t)read_buf_avail; status = tinfl_decompress( &inflator, (mz_uint8 *)pRead_buf + read_buf_ofs, &in_buf_size, (mz_uint8 *)pBuf, (mz_uint8 *)pBuf + out_buf_ofs, &out_buf_size, TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF | (comp_remaining ? TINFL_FLAG_HAS_MORE_INPUT : 0)); read_buf_avail -= in_buf_size; read_buf_ofs += in_buf_size; out_buf_ofs += out_buf_size; } while (status == TINFL_STATUS_NEEDS_MORE_INPUT); if (status == TINFL_STATUS_DONE) { // Make sure the entire file was decompressed, and check its CRC. if ((out_buf_ofs != file_stat.m_uncomp_size) || (mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, (size_t)file_stat.m_uncomp_size) != file_stat.m_crc32)) status = TINFL_STATUS_FAILED; } if ((!pZip->m_pState->m_pMem) && (!pUser_read_buf)) pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); return status == TINFL_STATUS_DONE; } mz_bool mz_zip_reader_extract_file_to_mem_no_alloc( mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size) { int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags); if (file_index < 0) return MZ_FALSE; return mz_zip_reader_extract_to_mem_no_alloc(pZip, file_index, pBuf, buf_size, flags, pUser_read_buf, user_read_buf_size); } mz_bool mz_zip_reader_extract_to_mem(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags) { return mz_zip_reader_extract_to_mem_no_alloc(pZip, file_index, pBuf, buf_size, flags, NULL, 0); } mz_bool mz_zip_reader_extract_file_to_mem(mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, mz_uint flags) { return mz_zip_reader_extract_file_to_mem_no_alloc(pZip, pFilename, pBuf, buf_size, flags, NULL, 0); } void *mz_zip_reader_extract_to_heap(mz_zip_archive *pZip, mz_uint file_index, size_t *pSize, mz_uint flags) { mz_uint64 comp_size, uncomp_size, alloc_size; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); void *pBuf; if (pSize) *pSize = 0; if (!p) return NULL; comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS); uncomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS); alloc_size = (flags & MZ_ZIP_FLAG_COMPRESSED_DATA) ? comp_size : uncomp_size; #ifdef _MSC_VER if (((0, sizeof(size_t) == sizeof(mz_uint32))) && (alloc_size > 0x7FFFFFFF)) #else if (((sizeof(size_t) == sizeof(mz_uint32))) && (alloc_size > 0x7FFFFFFF)) #endif return NULL; if (NULL == (pBuf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)alloc_size))) return NULL; if (!mz_zip_reader_extract_to_mem(pZip, file_index, pBuf, (size_t)alloc_size, flags)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return NULL; } if (pSize) *pSize = (size_t)alloc_size; return pBuf; } void *mz_zip_reader_extract_file_to_heap(mz_zip_archive *pZip, const char *pFilename, size_t *pSize, mz_uint flags) { int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags); if (file_index < 0) { if (pSize) *pSize = 0; return MZ_FALSE; } return mz_zip_reader_extract_to_heap(pZip, file_index, pSize, flags); } mz_bool mz_zip_reader_extract_to_callback(mz_zip_archive *pZip, mz_uint file_index, mz_file_write_func pCallback, void *pOpaque, mz_uint flags) { int status = TINFL_STATUS_DONE; mz_uint file_crc32 = MZ_CRC32_INIT; mz_uint64 read_buf_size, read_buf_ofs = 0, read_buf_avail, comp_remaining, out_buf_ofs = 0, cur_file_ofs; mz_zip_archive_file_stat file_stat; void *pRead_buf = NULL; void *pWrite_buf = NULL; mz_uint32 local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) / sizeof(mz_uint32)]; mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32; if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE; // Empty file, or a directory (but not always a directory - I've seen odd zips // with directories that have compressed data which inflates to 0 bytes) if (!file_stat.m_comp_size) return MZ_TRUE; // Entry is a subdirectory (I've seen old zips with dir entries which have // compressed deflate data which inflates to 0 bytes, but these entries claim // to uncompress to 512 bytes in the headers). // I'm torn how to handle this case - should it fail instead? if (mz_zip_reader_is_file_a_directory(pZip, file_index)) return MZ_TRUE; // Encryption and patch files are not supported. if (file_stat.m_bit_flag & (1 | 32)) return MZ_FALSE; // This function only supports stored and deflate. if ((!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (file_stat.m_method != 0) && (file_stat.m_method != MZ_DEFLATED)) return MZ_FALSE; // Read and parse the local directory entry. cur_file_ofs = file_stat.m_local_header_ofs; if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE) return MZ_FALSE; if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG) return MZ_FALSE; cur_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS); if ((cur_file_ofs + file_stat.m_comp_size) > pZip->m_archive_size) return MZ_FALSE; // Decompress the file either directly from memory or from a file input // buffer. if (pZip->m_pState->m_pMem) { pRead_buf = (mz_uint8 *)pZip->m_pState->m_pMem + cur_file_ofs; read_buf_size = read_buf_avail = file_stat.m_comp_size; comp_remaining = 0; } else { read_buf_size = MZ_MIN(file_stat.m_comp_size, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE); if (NULL == (pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)read_buf_size))) return MZ_FALSE; read_buf_avail = 0; comp_remaining = file_stat.m_comp_size; } if ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!file_stat.m_method)) { // The file is stored or the caller has requested the compressed data. if (pZip->m_pState->m_pMem) { #ifdef _MSC_VER if (((0, sizeof(size_t) == sizeof(mz_uint32))) && (file_stat.m_comp_size > 0xFFFFFFFF)) #else if (((sizeof(size_t) == sizeof(mz_uint32))) && (file_stat.m_comp_size > 0xFFFFFFFF)) #endif return MZ_FALSE; if (pCallback(pOpaque, out_buf_ofs, pRead_buf, (size_t)file_stat.m_comp_size) != file_stat.m_comp_size) status = TINFL_STATUS_FAILED; else if (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) file_crc32 = (mz_uint32)mz_crc32(file_crc32, (const mz_uint8 *)pRead_buf, (size_t)file_stat.m_comp_size); cur_file_ofs += file_stat.m_comp_size; out_buf_ofs += file_stat.m_comp_size; comp_remaining = 0; } else { while (comp_remaining) { read_buf_avail = MZ_MIN(read_buf_size, comp_remaining); if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf, (size_t)read_buf_avail) != read_buf_avail) { status = TINFL_STATUS_FAILED; break; } if (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) file_crc32 = (mz_uint32)mz_crc32( file_crc32, (const mz_uint8 *)pRead_buf, (size_t)read_buf_avail); if (pCallback(pOpaque, out_buf_ofs, pRead_buf, (size_t)read_buf_avail) != read_buf_avail) { status = TINFL_STATUS_FAILED; break; } cur_file_ofs += read_buf_avail; out_buf_ofs += read_buf_avail; comp_remaining -= read_buf_avail; } } } else { tinfl_decompressor inflator; tinfl_init(&inflator); if (NULL == (pWrite_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, TINFL_LZ_DICT_SIZE))) status = TINFL_STATUS_FAILED; else { do { mz_uint8 *pWrite_buf_cur = (mz_uint8 *)pWrite_buf + (out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1)); size_t in_buf_size, out_buf_size = TINFL_LZ_DICT_SIZE - (out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1)); if ((!read_buf_avail) && (!pZip->m_pState->m_pMem)) { read_buf_avail = MZ_MIN(read_buf_size, comp_remaining); if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf, (size_t)read_buf_avail) != read_buf_avail) { status = TINFL_STATUS_FAILED; break; } cur_file_ofs += read_buf_avail; comp_remaining -= read_buf_avail; read_buf_ofs = 0; } in_buf_size = (size_t)read_buf_avail; status = tinfl_decompress( &inflator, (const mz_uint8 *)pRead_buf + read_buf_ofs, &in_buf_size, (mz_uint8 *)pWrite_buf, pWrite_buf_cur, &out_buf_size, comp_remaining ? TINFL_FLAG_HAS_MORE_INPUT : 0); read_buf_avail -= in_buf_size; read_buf_ofs += in_buf_size; if (out_buf_size) { if (pCallback(pOpaque, out_buf_ofs, pWrite_buf_cur, out_buf_size) != out_buf_size) { status = TINFL_STATUS_FAILED; break; } file_crc32 = (mz_uint32)mz_crc32(file_crc32, pWrite_buf_cur, out_buf_size); if ((out_buf_ofs += out_buf_size) > file_stat.m_uncomp_size) { status = TINFL_STATUS_FAILED; break; } } } while ((status == TINFL_STATUS_NEEDS_MORE_INPUT) || (status == TINFL_STATUS_HAS_MORE_OUTPUT)); } } if ((status == TINFL_STATUS_DONE) && (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA))) { // Make sure the entire file was decompressed, and check its CRC. if ((out_buf_ofs != file_stat.m_uncomp_size) || (file_crc32 != file_stat.m_crc32)) status = TINFL_STATUS_FAILED; } if (!pZip->m_pState->m_pMem) pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); if (pWrite_buf) pZip->m_pFree(pZip->m_pAlloc_opaque, pWrite_buf); return status == TINFL_STATUS_DONE; } mz_bool mz_zip_reader_extract_file_to_callback(mz_zip_archive *pZip, const char *pFilename, mz_file_write_func pCallback, void *pOpaque, mz_uint flags) { int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags); if (file_index < 0) return MZ_FALSE; return mz_zip_reader_extract_to_callback(pZip, file_index, pCallback, pOpaque, flags); } #ifndef MINIZ_NO_STDIO static size_t mz_zip_file_write_callback(void *pOpaque, mz_uint64 ofs, const void *pBuf, size_t n) { (void)ofs; return MZ_FWRITE(pBuf, 1, n, (MZ_FILE *)pOpaque); } mz_bool mz_zip_reader_extract_to_file(mz_zip_archive *pZip, mz_uint file_index, const char *pDst_filename, mz_uint flags) { mz_bool status; mz_zip_archive_file_stat file_stat; MZ_FILE *pFile; if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE; pFile = MZ_FOPEN(pDst_filename, "wb"); if (!pFile) return MZ_FALSE; status = mz_zip_reader_extract_to_callback( pZip, file_index, mz_zip_file_write_callback, pFile, flags); if (MZ_FCLOSE(pFile) == EOF) return MZ_FALSE; #ifndef MINIZ_NO_TIME if (status) mz_zip_set_file_times(pDst_filename, file_stat.m_time, file_stat.m_time); #endif return status; } #endif // #ifndef MINIZ_NO_STDIO mz_bool mz_zip_reader_end(mz_zip_archive *pZip) { if ((!pZip) || (!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING)) return MZ_FALSE; if (pZip->m_pState) { mz_zip_internal_state *pState = pZip->m_pState; pZip->m_pState = NULL; mz_zip_array_clear(pZip, &pState->m_central_dir); mz_zip_array_clear(pZip, &pState->m_central_dir_offsets); mz_zip_array_clear(pZip, &pState->m_sorted_central_dir_offsets); #ifndef MINIZ_NO_STDIO if (pState->m_pFile) { MZ_FCLOSE(pState->m_pFile); pState->m_pFile = NULL; } #endif // #ifndef MINIZ_NO_STDIO pZip->m_pFree(pZip->m_pAlloc_opaque, pState); } pZip->m_zip_mode = MZ_ZIP_MODE_INVALID; return MZ_TRUE; } #ifndef MINIZ_NO_STDIO mz_bool mz_zip_reader_extract_file_to_file(mz_zip_archive *pZip, const char *pArchive_filename, const char *pDst_filename, mz_uint flags) { int file_index = mz_zip_reader_locate_file(pZip, pArchive_filename, NULL, flags); if (file_index < 0) return MZ_FALSE; return mz_zip_reader_extract_to_file(pZip, file_index, pDst_filename, flags); } #endif // ------------------- .ZIP archive writing #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS static void mz_write_le16(mz_uint8 *p, mz_uint16 v) { p[0] = (mz_uint8)v; p[1] = (mz_uint8)(v >> 8); } static void mz_write_le32(mz_uint8 *p, mz_uint32 v) { p[0] = (mz_uint8)v; p[1] = (mz_uint8)(v >> 8); p[2] = (mz_uint8)(v >> 16); p[3] = (mz_uint8)(v >> 24); } #define MZ_WRITE_LE16(p, v) mz_write_le16((mz_uint8 *)(p), (mz_uint16)(v)) #define MZ_WRITE_LE32(p, v) mz_write_le32((mz_uint8 *)(p), (mz_uint32)(v)) mz_bool mz_zip_writer_init(mz_zip_archive *pZip, mz_uint64 existing_size) { if ((!pZip) || (pZip->m_pState) || (!pZip->m_pWrite) || (pZip->m_zip_mode != MZ_ZIP_MODE_INVALID)) return MZ_FALSE; if (pZip->m_file_offset_alignment) { // Ensure user specified file offset alignment is a power of 2. if (pZip->m_file_offset_alignment & (pZip->m_file_offset_alignment - 1)) return MZ_FALSE; } if (!pZip->m_pAlloc) pZip->m_pAlloc = def_alloc_func; if (!pZip->m_pFree) pZip->m_pFree = def_free_func; if (!pZip->m_pRealloc) pZip->m_pRealloc = def_realloc_func; pZip->m_zip_mode = MZ_ZIP_MODE_WRITING; pZip->m_archive_size = existing_size; pZip->m_central_directory_file_ofs = 0; pZip->m_total_files = 0; if (NULL == (pZip->m_pState = (mz_zip_internal_state *)pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, sizeof(mz_zip_internal_state)))) return MZ_FALSE; memset(pZip->m_pState, 0, sizeof(mz_zip_internal_state)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir, sizeof(mz_uint8)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir_offsets, sizeof(mz_uint32)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_sorted_central_dir_offsets, sizeof(mz_uint32)); return MZ_TRUE; } static size_t mz_zip_heap_write_func(void *pOpaque, mz_uint64 file_ofs, const void *pBuf, size_t n) { mz_zip_archive *pZip = (mz_zip_archive *)pOpaque; mz_zip_internal_state *pState = pZip->m_pState; mz_uint64 new_size = MZ_MAX(file_ofs + n, pState->m_mem_size); #ifdef _MSC_VER if ((!n) || ((0, sizeof(size_t) == sizeof(mz_uint32)) && (new_size > 0x7FFFFFFF))) #else if ((!n) || ((sizeof(size_t) == sizeof(mz_uint32)) && (new_size > 0x7FFFFFFF))) #endif return 0; if (new_size > pState->m_mem_capacity) { void *pNew_block; size_t new_capacity = MZ_MAX(64, pState->m_mem_capacity); while (new_capacity < new_size) new_capacity *= 2; if (NULL == (pNew_block = pZip->m_pRealloc( pZip->m_pAlloc_opaque, pState->m_pMem, 1, new_capacity))) return 0; pState->m_pMem = pNew_block; pState->m_mem_capacity = new_capacity; } memcpy((mz_uint8 *)pState->m_pMem + file_ofs, pBuf, n); pState->m_mem_size = (size_t)new_size; return n; } mz_bool mz_zip_writer_init_heap(mz_zip_archive *pZip, size_t size_to_reserve_at_beginning, size_t initial_allocation_size) { pZip->m_pWrite = mz_zip_heap_write_func; pZip->m_pIO_opaque = pZip; if (!mz_zip_writer_init(pZip, size_to_reserve_at_beginning)) return MZ_FALSE; if (0 != (initial_allocation_size = MZ_MAX(initial_allocation_size, size_to_reserve_at_beginning))) { if (NULL == (pZip->m_pState->m_pMem = pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, initial_allocation_size))) { mz_zip_writer_end(pZip); return MZ_FALSE; } pZip->m_pState->m_mem_capacity = initial_allocation_size; } return MZ_TRUE; } #ifndef MINIZ_NO_STDIO static size_t mz_zip_file_write_func(void *pOpaque, mz_uint64 file_ofs, const void *pBuf, size_t n) { mz_zip_archive *pZip = (mz_zip_archive *)pOpaque; mz_int64 cur_ofs = MZ_FTELL64(pZip->m_pState->m_pFile); if (((mz_int64)file_ofs < 0) || (((cur_ofs != (mz_int64)file_ofs)) && (MZ_FSEEK64(pZip->m_pState->m_pFile, (mz_int64)file_ofs, SEEK_SET)))) return 0; return MZ_FWRITE(pBuf, 1, n, pZip->m_pState->m_pFile); } mz_bool mz_zip_writer_init_file(mz_zip_archive *pZip, const char *pFilename, mz_uint64 size_to_reserve_at_beginning) { MZ_FILE *pFile; pZip->m_pWrite = mz_zip_file_write_func; pZip->m_pIO_opaque = pZip; if (!mz_zip_writer_init(pZip, size_to_reserve_at_beginning)) return MZ_FALSE; if (NULL == (pFile = MZ_FOPEN(pFilename, "wb"))) { mz_zip_writer_end(pZip); return MZ_FALSE; } pZip->m_pState->m_pFile = pFile; if (size_to_reserve_at_beginning) { mz_uint64 cur_ofs = 0; char buf[4096]; MZ_CLEAR_OBJ(buf); do { size_t n = (size_t)MZ_MIN(sizeof(buf), size_to_reserve_at_beginning); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_ofs, buf, n) != n) { mz_zip_writer_end(pZip); return MZ_FALSE; } cur_ofs += n; size_to_reserve_at_beginning -= n; } while (size_to_reserve_at_beginning); } return MZ_TRUE; } #endif // #ifndef MINIZ_NO_STDIO mz_bool mz_zip_writer_init_from_reader(mz_zip_archive *pZip, const char *pFilename) { mz_zip_internal_state *pState; if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING)) return MZ_FALSE; // No sense in trying to write to an archive that's already at the support max // size if ((pZip->m_total_files == 0xFFFF) || ((pZip->m_archive_size + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + MZ_ZIP_LOCAL_DIR_HEADER_SIZE) > 0xFFFFFFFF)) return MZ_FALSE; pState = pZip->m_pState; if (pState->m_pFile) { #ifdef MINIZ_NO_STDIO pFilename; return MZ_FALSE; #else // Archive is being read from stdio - try to reopen as writable. if (pZip->m_pIO_opaque != pZip) return MZ_FALSE; if (!pFilename) return MZ_FALSE; pZip->m_pWrite = mz_zip_file_write_func; if (NULL == (pState->m_pFile = MZ_FREOPEN(pFilename, "r+b", pState->m_pFile))) { // The mz_zip_archive is now in a bogus state because pState->m_pFile is // NULL, so just close it. mz_zip_reader_end(pZip); return MZ_FALSE; } #endif // #ifdef MINIZ_NO_STDIO } else if (pState->m_pMem) { // Archive lives in a memory block. Assume it's from the heap that we can // resize using the realloc callback. if (pZip->m_pIO_opaque != pZip) return MZ_FALSE; pState->m_mem_capacity = pState->m_mem_size; pZip->m_pWrite = mz_zip_heap_write_func; } // Archive is being read via a user provided read function - make sure the // user has specified a write function too. else if (!pZip->m_pWrite) return MZ_FALSE; // Start writing new files at the archive's current central directory // location. pZip->m_archive_size = pZip->m_central_directory_file_ofs; pZip->m_zip_mode = MZ_ZIP_MODE_WRITING; pZip->m_central_directory_file_ofs = 0; return MZ_TRUE; } mz_bool mz_zip_writer_add_mem(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, mz_uint level_and_flags) { return mz_zip_writer_add_mem_ex(pZip, pArchive_name, pBuf, buf_size, NULL, 0, level_and_flags, 0, 0); } typedef struct { mz_zip_archive *m_pZip; mz_uint64 m_cur_archive_file_ofs; mz_uint64 m_comp_size; } mz_zip_writer_add_state; static mz_bool mz_zip_writer_add_put_buf_callback(const void *pBuf, int len, void *pUser) { mz_zip_writer_add_state *pState = (mz_zip_writer_add_state *)pUser; if ((int)pState->m_pZip->m_pWrite(pState->m_pZip->m_pIO_opaque, pState->m_cur_archive_file_ofs, pBuf, len) != len) return MZ_FALSE; pState->m_cur_archive_file_ofs += len; pState->m_comp_size += len; return MZ_TRUE; } static mz_bool mz_zip_writer_create_local_dir_header( mz_zip_archive *pZip, mz_uint8 *pDst, mz_uint16 filename_size, mz_uint16 extra_size, mz_uint64 uncomp_size, mz_uint64 comp_size, mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags, mz_uint16 dos_time, mz_uint16 dos_date) { (void)pZip; memset(pDst, 0, MZ_ZIP_LOCAL_DIR_HEADER_SIZE); MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_SIG_OFS, MZ_ZIP_LOCAL_DIR_HEADER_SIG); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_VERSION_NEEDED_OFS, method ? 20 : 0); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_BIT_FLAG_OFS, bit_flags); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_METHOD_OFS, method); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILE_TIME_OFS, dos_time); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILE_DATE_OFS, dos_date); MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_CRC32_OFS, uncomp_crc32); MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_COMPRESSED_SIZE_OFS, comp_size); MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS, uncomp_size); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILENAME_LEN_OFS, filename_size); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_EXTRA_LEN_OFS, extra_size); return MZ_TRUE; } static mz_bool mz_zip_writer_create_central_dir_header( mz_zip_archive *pZip, mz_uint8 *pDst, mz_uint16 filename_size, mz_uint16 extra_size, mz_uint16 comment_size, mz_uint64 uncomp_size, mz_uint64 comp_size, mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags, mz_uint16 dos_time, mz_uint16 dos_date, mz_uint64 local_header_ofs, mz_uint32 ext_attributes) { (void)pZip; memset(pDst, 0, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_SIG_OFS, MZ_ZIP_CENTRAL_DIR_HEADER_SIG); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_VERSION_NEEDED_OFS, method ? 20 : 0); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_BIT_FLAG_OFS, bit_flags); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_METHOD_OFS, method); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILE_TIME_OFS, dos_time); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILE_DATE_OFS, dos_date); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_CRC32_OFS, uncomp_crc32); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS, comp_size); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS, uncomp_size); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILENAME_LEN_OFS, filename_size); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_EXTRA_LEN_OFS, extra_size); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_COMMENT_LEN_OFS, comment_size); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS, ext_attributes); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_LOCAL_HEADER_OFS, local_header_ofs); return MZ_TRUE; } static mz_bool mz_zip_writer_add_to_central_dir( mz_zip_archive *pZip, const char *pFilename, mz_uint16 filename_size, const void *pExtra, mz_uint16 extra_size, const void *pComment, mz_uint16 comment_size, mz_uint64 uncomp_size, mz_uint64 comp_size, mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags, mz_uint16 dos_time, mz_uint16 dos_date, mz_uint64 local_header_ofs, mz_uint32 ext_attributes) { mz_zip_internal_state *pState = pZip->m_pState; mz_uint32 central_dir_ofs = (mz_uint32)pState->m_central_dir.m_size; size_t orig_central_dir_size = pState->m_central_dir.m_size; mz_uint8 central_dir_header[MZ_ZIP_CENTRAL_DIR_HEADER_SIZE]; // No zip64 support yet if ((local_header_ofs > 0xFFFFFFFF) || (((mz_uint64)pState->m_central_dir.m_size + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_size + extra_size + comment_size) > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_writer_create_central_dir_header( pZip, central_dir_header, filename_size, extra_size, comment_size, uncomp_size, comp_size, uncomp_crc32, method, bit_flags, dos_time, dos_date, local_header_ofs, ext_attributes)) return MZ_FALSE; if ((!mz_zip_array_push_back(pZip, &pState->m_central_dir, central_dir_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE)) || (!mz_zip_array_push_back(pZip, &pState->m_central_dir, pFilename, filename_size)) || (!mz_zip_array_push_back(pZip, &pState->m_central_dir, pExtra, extra_size)) || (!mz_zip_array_push_back(pZip, &pState->m_central_dir, pComment, comment_size)) || (!mz_zip_array_push_back(pZip, &pState->m_central_dir_offsets, &central_dir_ofs, 1))) { // Try to push the central directory array back into its original state. mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size, MZ_FALSE); return MZ_FALSE; } return MZ_TRUE; } static mz_bool mz_zip_writer_validate_archive_name(const char *pArchive_name) { // Basic ZIP archive filename validity checks: Valid filenames cannot start // with a forward slash, cannot contain a drive letter, and cannot use // DOS-style backward slashes. if (*pArchive_name == '/') return MZ_FALSE; while (*pArchive_name) { if ((*pArchive_name == '\\') || (*pArchive_name == ':')) return MZ_FALSE; pArchive_name++; } return MZ_TRUE; } static mz_uint mz_zip_writer_compute_padding_needed_for_file_alignment( mz_zip_archive *pZip) { mz_uint32 n; if (!pZip->m_file_offset_alignment) return 0; n = (mz_uint32)(pZip->m_archive_size & (pZip->m_file_offset_alignment - 1)); return (pZip->m_file_offset_alignment - n) & (pZip->m_file_offset_alignment - 1); } static mz_bool mz_zip_writer_write_zeros(mz_zip_archive *pZip, mz_uint64 cur_file_ofs, mz_uint32 n) { char buf[4096]; memset(buf, 0, MZ_MIN(sizeof(buf), n)); while (n) { mz_uint32 s = MZ_MIN(sizeof(buf), n); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_file_ofs, buf, s) != s) return MZ_FALSE; cur_file_ofs += s; n -= s; } return MZ_TRUE; } mz_bool mz_zip_writer_add_mem_ex(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags, mz_uint64 uncomp_size, mz_uint32 uncomp_crc32) { mz_uint16 method = 0, dos_time = 0, dos_date = 0; mz_uint level, ext_attributes = 0, num_alignment_padding_bytes; mz_uint64 local_dir_header_ofs = pZip->m_archive_size, cur_archive_file_ofs = pZip->m_archive_size, comp_size = 0; size_t archive_name_size; mz_uint8 local_dir_header[MZ_ZIP_LOCAL_DIR_HEADER_SIZE]; tdefl_compressor *pComp = NULL; mz_bool store_data_uncompressed; mz_zip_internal_state *pState; if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL; level = level_and_flags & 0xF; store_data_uncompressed = ((!level) || (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)); if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || ((buf_size) && (!pBuf)) || (!pArchive_name) || ((comment_size) && (!pComment)) || (pZip->m_total_files == 0xFFFF) || (level > MZ_UBER_COMPRESSION)) return MZ_FALSE; pState = pZip->m_pState; if ((!(level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (uncomp_size)) return MZ_FALSE; // No zip64 support yet if ((buf_size > 0xFFFFFFFF) || (uncomp_size > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE; #ifndef MINIZ_NO_TIME { time_t cur_time; time(&cur_time); mz_zip_time_to_dos_time(cur_time, &dos_time, &dos_date); } #endif // #ifndef MINIZ_NO_TIME archive_name_size = strlen(pArchive_name); if (archive_name_size > 0xFFFF) return MZ_FALSE; num_alignment_padding_bytes = mz_zip_writer_compute_padding_needed_for_file_alignment(pZip); // no zip64 support yet if ((pZip->m_total_files == 0xFFFF) || ((pZip->m_archive_size + num_alignment_padding_bytes + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + comment_size + archive_name_size) > 0xFFFFFFFF)) return MZ_FALSE; if ((archive_name_size) && (pArchive_name[archive_name_size - 1] == '/')) { // Set DOS Subdirectory attribute bit. ext_attributes |= 0x10; // Subdirectories cannot contain data. if ((buf_size) || (uncomp_size)) return MZ_FALSE; } // Try to do any allocations before writing to the archive, so if an // allocation fails the file remains unmodified. (A good idea if we're doing // an in-place modification.) if ((!mz_zip_array_ensure_room( pZip, &pState->m_central_dir, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + archive_name_size + comment_size)) || (!mz_zip_array_ensure_room(pZip, &pState->m_central_dir_offsets, 1))) return MZ_FALSE; if ((!store_data_uncompressed) && (buf_size)) { if (NULL == (pComp = (tdefl_compressor *)pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, sizeof(tdefl_compressor)))) return MZ_FALSE; } if (!mz_zip_writer_write_zeros( pZip, cur_archive_file_ofs, num_alignment_padding_bytes + sizeof(local_dir_header))) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); return MZ_FALSE; } local_dir_header_ofs += num_alignment_padding_bytes; if (pZip->m_file_offset_alignment) { MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) == 0); } cur_archive_file_ofs += num_alignment_padding_bytes + sizeof(local_dir_header); MZ_CLEAR_OBJ(local_dir_header); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name, archive_name_size) != archive_name_size) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); return MZ_FALSE; } cur_archive_file_ofs += archive_name_size; if (!(level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) { uncomp_crc32 = (mz_uint32)mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, buf_size); uncomp_size = buf_size; if (uncomp_size <= 3) { level = 0; store_data_uncompressed = MZ_TRUE; } } if (store_data_uncompressed) { if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pBuf, buf_size) != buf_size) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); return MZ_FALSE; } cur_archive_file_ofs += buf_size; comp_size = buf_size; if (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA) method = MZ_DEFLATED; } else if (buf_size) { mz_zip_writer_add_state state; state.m_pZip = pZip; state.m_cur_archive_file_ofs = cur_archive_file_ofs; state.m_comp_size = 0; if ((tdefl_init(pComp, mz_zip_writer_add_put_buf_callback, &state, tdefl_create_comp_flags_from_zip_params( level, -15, MZ_DEFAULT_STRATEGY)) != TDEFL_STATUS_OKAY) || (tdefl_compress_buffer(pComp, pBuf, buf_size, TDEFL_FINISH) != TDEFL_STATUS_DONE)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); return MZ_FALSE; } comp_size = state.m_comp_size; cur_archive_file_ofs = state.m_cur_archive_file_ofs; method = MZ_DEFLATED; } pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); pComp = NULL; // no zip64 support yet if ((comp_size > 0xFFFFFFFF) || (cur_archive_file_ofs > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_writer_create_local_dir_header( pZip, local_dir_header, (mz_uint16)archive_name_size, 0, uncomp_size, comp_size, uncomp_crc32, method, 0, dos_time, dos_date)) return MZ_FALSE; if (pZip->m_pWrite(pZip->m_pIO_opaque, local_dir_header_ofs, local_dir_header, sizeof(local_dir_header)) != sizeof(local_dir_header)) return MZ_FALSE; if (!mz_zip_writer_add_to_central_dir( pZip, pArchive_name, (mz_uint16)archive_name_size, NULL, 0, pComment, comment_size, uncomp_size, comp_size, uncomp_crc32, method, 0, dos_time, dos_date, local_dir_header_ofs, ext_attributes)) return MZ_FALSE; pZip->m_total_files++; pZip->m_archive_size = cur_archive_file_ofs; return MZ_TRUE; } #ifndef MINIZ_NO_STDIO mz_bool mz_zip_writer_add_file(mz_zip_archive *pZip, const char *pArchive_name, const char *pSrc_filename, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags) { mz_uint uncomp_crc32 = MZ_CRC32_INIT, level, num_alignment_padding_bytes; mz_uint16 method = 0, dos_time = 0, dos_date = 0, ext_attributes = 0; mz_uint64 local_dir_header_ofs = pZip->m_archive_size, cur_archive_file_ofs = pZip->m_archive_size, uncomp_size = 0, comp_size = 0; size_t archive_name_size; mz_uint8 local_dir_header[MZ_ZIP_LOCAL_DIR_HEADER_SIZE]; MZ_FILE *pSrc_file = NULL; if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL; level = level_and_flags & 0xF; if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || (!pArchive_name) || ((comment_size) && (!pComment)) || (level > MZ_UBER_COMPRESSION)) return MZ_FALSE; if (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA) return MZ_FALSE; if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE; archive_name_size = strlen(pArchive_name); if (archive_name_size > 0xFFFF) return MZ_FALSE; num_alignment_padding_bytes = mz_zip_writer_compute_padding_needed_for_file_alignment(pZip); // no zip64 support yet if ((pZip->m_total_files == 0xFFFF) || ((pZip->m_archive_size + num_alignment_padding_bytes + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + comment_size + archive_name_size) > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_get_file_modified_time(pSrc_filename, &dos_time, &dos_date)) return MZ_FALSE; pSrc_file = MZ_FOPEN(pSrc_filename, "rb"); if (!pSrc_file) return MZ_FALSE; MZ_FSEEK64(pSrc_file, 0, SEEK_END); uncomp_size = MZ_FTELL64(pSrc_file); MZ_FSEEK64(pSrc_file, 0, SEEK_SET); if (uncomp_size > 0xFFFFFFFF) { // No zip64 support yet MZ_FCLOSE(pSrc_file); return MZ_FALSE; } if (uncomp_size <= 3) level = 0; if (!mz_zip_writer_write_zeros( pZip, cur_archive_file_ofs, num_alignment_padding_bytes + sizeof(local_dir_header))) { MZ_FCLOSE(pSrc_file); return MZ_FALSE; } local_dir_header_ofs += num_alignment_padding_bytes; if (pZip->m_file_offset_alignment) { MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) == 0); } cur_archive_file_ofs += num_alignment_padding_bytes + sizeof(local_dir_header); MZ_CLEAR_OBJ(local_dir_header); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name, archive_name_size) != archive_name_size) { MZ_FCLOSE(pSrc_file); return MZ_FALSE; } cur_archive_file_ofs += archive_name_size; if (uncomp_size) { mz_uint64 uncomp_remaining = uncomp_size; void *pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, MZ_ZIP_MAX_IO_BUF_SIZE); if (!pRead_buf) { MZ_FCLOSE(pSrc_file); return MZ_FALSE; } if (!level) { while (uncomp_remaining) { mz_uint n = (mz_uint)MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE, uncomp_remaining); if ((MZ_FREAD(pRead_buf, 1, n, pSrc_file) != n) || (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pRead_buf, n) != n)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); MZ_FCLOSE(pSrc_file); return MZ_FALSE; } uncomp_crc32 = (mz_uint32)mz_crc32(uncomp_crc32, (const mz_uint8 *)pRead_buf, n); uncomp_remaining -= n; cur_archive_file_ofs += n; } comp_size = uncomp_size; } else { mz_bool result = MZ_FALSE; mz_zip_writer_add_state state; tdefl_compressor *pComp = (tdefl_compressor *)pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, sizeof(tdefl_compressor)); if (!pComp) { pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); MZ_FCLOSE(pSrc_file); return MZ_FALSE; } state.m_pZip = pZip; state.m_cur_archive_file_ofs = cur_archive_file_ofs; state.m_comp_size = 0; if (tdefl_init(pComp, mz_zip_writer_add_put_buf_callback, &state, tdefl_create_comp_flags_from_zip_params( level, -15, MZ_DEFAULT_STRATEGY)) != TDEFL_STATUS_OKAY) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); MZ_FCLOSE(pSrc_file); return MZ_FALSE; } for (;;) { size_t in_buf_size = (mz_uint32)MZ_MIN(uncomp_remaining, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE); tdefl_status status; if (MZ_FREAD(pRead_buf, 1, in_buf_size, pSrc_file) != in_buf_size) break; uncomp_crc32 = (mz_uint32)mz_crc32( uncomp_crc32, (const mz_uint8 *)pRead_buf, in_buf_size); uncomp_remaining -= in_buf_size; status = tdefl_compress_buffer( pComp, pRead_buf, in_buf_size, uncomp_remaining ? TDEFL_NO_FLUSH : TDEFL_FINISH); if (status == TDEFL_STATUS_DONE) { result = MZ_TRUE; break; } else if (status != TDEFL_STATUS_OKAY) break; } pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); if (!result) { pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); MZ_FCLOSE(pSrc_file); return MZ_FALSE; } comp_size = state.m_comp_size; cur_archive_file_ofs = state.m_cur_archive_file_ofs; method = MZ_DEFLATED; } pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); } MZ_FCLOSE(pSrc_file); pSrc_file = NULL; // no zip64 support yet if ((comp_size > 0xFFFFFFFF) || (cur_archive_file_ofs > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_writer_create_local_dir_header( pZip, local_dir_header, (mz_uint16)archive_name_size, 0, uncomp_size, comp_size, uncomp_crc32, method, 0, dos_time, dos_date)) return MZ_FALSE; if (pZip->m_pWrite(pZip->m_pIO_opaque, local_dir_header_ofs, local_dir_header, sizeof(local_dir_header)) != sizeof(local_dir_header)) return MZ_FALSE; if (!mz_zip_writer_add_to_central_dir( pZip, pArchive_name, (mz_uint16)archive_name_size, NULL, 0, pComment, comment_size, uncomp_size, comp_size, uncomp_crc32, method, 0, dos_time, dos_date, local_dir_header_ofs, ext_attributes)) return MZ_FALSE; pZip->m_total_files++; pZip->m_archive_size = cur_archive_file_ofs; return MZ_TRUE; } #endif // #ifndef MINIZ_NO_STDIO mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip, mz_zip_archive *pSource_zip, mz_uint file_index) { mz_uint n, bit_flags, num_alignment_padding_bytes; mz_uint64 comp_bytes_remaining, local_dir_header_ofs; mz_uint64 cur_src_file_ofs, cur_dst_file_ofs; mz_uint32 local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) / sizeof(mz_uint32)]; mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32; mz_uint8 central_header[MZ_ZIP_CENTRAL_DIR_HEADER_SIZE]; size_t orig_central_dir_size; mz_zip_internal_state *pState; void *pBuf; const mz_uint8 *pSrc_central_header; if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING)) return MZ_FALSE; if (NULL == (pSrc_central_header = mz_zip_reader_get_cdh(pSource_zip, file_index))) return MZ_FALSE; pState = pZip->m_pState; num_alignment_padding_bytes = mz_zip_writer_compute_padding_needed_for_file_alignment(pZip); // no zip64 support yet if ((pZip->m_total_files == 0xFFFF) || ((pZip->m_archive_size + num_alignment_padding_bytes + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) > 0xFFFFFFFF)) return MZ_FALSE; cur_src_file_ofs = MZ_READ_LE32(pSrc_central_header + MZ_ZIP_CDH_LOCAL_HEADER_OFS); cur_dst_file_ofs = pZip->m_archive_size; if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE) return MZ_FALSE; if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG) return MZ_FALSE; cur_src_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE; if (!mz_zip_writer_write_zeros(pZip, cur_dst_file_ofs, num_alignment_padding_bytes)) return MZ_FALSE; cur_dst_file_ofs += num_alignment_padding_bytes; local_dir_header_ofs = cur_dst_file_ofs; if (pZip->m_file_offset_alignment) { MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) == 0); } if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE) return MZ_FALSE; cur_dst_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE; n = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS); comp_bytes_remaining = n + MZ_READ_LE32(pSrc_central_header + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS); if (NULL == (pBuf = pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, (size_t)MZ_MAX(sizeof(mz_uint32) * 4, MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE, comp_bytes_remaining))))) return MZ_FALSE; while (comp_bytes_remaining) { n = (mz_uint)MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE, comp_bytes_remaining); if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pBuf, n) != n) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return MZ_FALSE; } cur_src_file_ofs += n; if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pBuf, n) != n) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return MZ_FALSE; } cur_dst_file_ofs += n; comp_bytes_remaining -= n; } bit_flags = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_BIT_FLAG_OFS); if (bit_flags & 8) { // Copy data descriptor if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pBuf, sizeof(mz_uint32) * 4) != sizeof(mz_uint32) * 4) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return MZ_FALSE; } n = sizeof(mz_uint32) * ((MZ_READ_LE32(pBuf) == 0x08074b50) ? 4 : 3); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pBuf, n) != n) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return MZ_FALSE; } cur_src_file_ofs += n; cur_dst_file_ofs += n; } pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); // no zip64 support yet if (cur_dst_file_ofs > 0xFFFFFFFF) return MZ_FALSE; orig_central_dir_size = pState->m_central_dir.m_size; memcpy(central_header, pSrc_central_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE); MZ_WRITE_LE32(central_header + MZ_ZIP_CDH_LOCAL_HEADER_OFS, local_dir_header_ofs); if (!mz_zip_array_push_back(pZip, &pState->m_central_dir, central_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE)) return MZ_FALSE; n = MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_FILENAME_LEN_OFS) + MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_EXTRA_LEN_OFS) + MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_COMMENT_LEN_OFS); if (!mz_zip_array_push_back( pZip, &pState->m_central_dir, pSrc_central_header + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n)) { mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size, MZ_FALSE); return MZ_FALSE; } if (pState->m_central_dir.m_size > 0xFFFFFFFF) return MZ_FALSE; n = (mz_uint32)orig_central_dir_size; if (!mz_zip_array_push_back(pZip, &pState->m_central_dir_offsets, &n, 1)) { mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size, MZ_FALSE); return MZ_FALSE; } pZip->m_total_files++; pZip->m_archive_size = cur_dst_file_ofs; return MZ_TRUE; } mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip) { mz_zip_internal_state *pState; mz_uint64 central_dir_ofs, central_dir_size; mz_uint8 hdr[MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE]; if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING)) return MZ_FALSE; pState = pZip->m_pState; // no zip64 support yet if ((pZip->m_total_files > 0xFFFF) || ((pZip->m_archive_size + pState->m_central_dir.m_size + MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) > 0xFFFFFFFF)) return MZ_FALSE; central_dir_ofs = 0; central_dir_size = 0; if (pZip->m_total_files) { // Write central directory central_dir_ofs = pZip->m_archive_size; central_dir_size = pState->m_central_dir.m_size; pZip->m_central_directory_file_ofs = central_dir_ofs; if (pZip->m_pWrite(pZip->m_pIO_opaque, central_dir_ofs, pState->m_central_dir.m_p, (size_t)central_dir_size) != central_dir_size) return MZ_FALSE; pZip->m_archive_size += central_dir_size; } // Write end of central directory record MZ_CLEAR_OBJ(hdr); MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_SIG_OFS, MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG); MZ_WRITE_LE16(hdr + MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS, pZip->m_total_files); MZ_WRITE_LE16(hdr + MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS, pZip->m_total_files); MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_CDIR_SIZE_OFS, central_dir_size); MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_CDIR_OFS_OFS, central_dir_ofs); if (pZip->m_pWrite(pZip->m_pIO_opaque, pZip->m_archive_size, hdr, sizeof(hdr)) != sizeof(hdr)) return MZ_FALSE; #ifndef MINIZ_NO_STDIO if ((pState->m_pFile) && (MZ_FFLUSH(pState->m_pFile) == EOF)) return MZ_FALSE; #endif // #ifndef MINIZ_NO_STDIO pZip->m_archive_size += sizeof(hdr); pZip->m_zip_mode = MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED; return MZ_TRUE; } mz_bool mz_zip_writer_finalize_heap_archive(mz_zip_archive *pZip, void **pBuf, size_t *pSize) { if ((!pZip) || (!pZip->m_pState) || (!pBuf) || (!pSize)) return MZ_FALSE; if (pZip->m_pWrite != mz_zip_heap_write_func) return MZ_FALSE; if (!mz_zip_writer_finalize_archive(pZip)) return MZ_FALSE; *pBuf = pZip->m_pState->m_pMem; *pSize = pZip->m_pState->m_mem_size; pZip->m_pState->m_pMem = NULL; pZip->m_pState->m_mem_size = pZip->m_pState->m_mem_capacity = 0; return MZ_TRUE; } mz_bool mz_zip_writer_end(mz_zip_archive *pZip) { mz_zip_internal_state *pState; mz_bool status = MZ_TRUE; if ((!pZip) || (!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) || ((pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) && (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED))) return MZ_FALSE; pState = pZip->m_pState; pZip->m_pState = NULL; mz_zip_array_clear(pZip, &pState->m_central_dir); mz_zip_array_clear(pZip, &pState->m_central_dir_offsets); mz_zip_array_clear(pZip, &pState->m_sorted_central_dir_offsets); #ifndef MINIZ_NO_STDIO if (pState->m_pFile) { MZ_FCLOSE(pState->m_pFile); pState->m_pFile = NULL; } #endif // #ifndef MINIZ_NO_STDIO if ((pZip->m_pWrite == mz_zip_heap_write_func) && (pState->m_pMem)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pState->m_pMem); pState->m_pMem = NULL; } pZip->m_pFree(pZip->m_pAlloc_opaque, pState); pZip->m_zip_mode = MZ_ZIP_MODE_INVALID; return status; } #ifndef MINIZ_NO_STDIO mz_bool mz_zip_add_mem_to_archive_file_in_place( const char *pZip_filename, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags) { mz_bool status, created_new_archive = MZ_FALSE; mz_zip_archive zip_archive; struct MZ_FILE_STAT_STRUCT file_stat; MZ_CLEAR_OBJ(zip_archive); if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL; if ((!pZip_filename) || (!pArchive_name) || ((buf_size) && (!pBuf)) || ((comment_size) && (!pComment)) || ((level_and_flags & 0xF) > MZ_UBER_COMPRESSION)) return MZ_FALSE; if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE; if (MZ_FILE_STAT(pZip_filename, &file_stat) != 0) { // Create a new archive. if (!mz_zip_writer_init_file(&zip_archive, pZip_filename, 0)) return MZ_FALSE; created_new_archive = MZ_TRUE; } else { // Append to an existing archive. if (!mz_zip_reader_init_file( &zip_archive, pZip_filename, level_and_flags | MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY)) return MZ_FALSE; if (!mz_zip_writer_init_from_reader(&zip_archive, pZip_filename)) { mz_zip_reader_end(&zip_archive); return MZ_FALSE; } } status = mz_zip_writer_add_mem_ex(&zip_archive, pArchive_name, pBuf, buf_size, pComment, comment_size, level_and_flags, 0, 0); // Always finalize, even if adding failed for some reason, so we have a valid // central directory. (This may not always succeed, but we can try.) if (!mz_zip_writer_finalize_archive(&zip_archive)) status = MZ_FALSE; if (!mz_zip_writer_end(&zip_archive)) status = MZ_FALSE; if ((!status) && (created_new_archive)) { // It's a new archive and something went wrong, so just delete it. int ignoredStatus = MZ_DELETE_FILE(pZip_filename); (void)ignoredStatus; } return status; } void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, const char *pArchive_name, size_t *pSize, mz_uint flags) { int file_index; mz_zip_archive zip_archive; void *p = NULL; if (pSize) *pSize = 0; if ((!pZip_filename) || (!pArchive_name)) return NULL; MZ_CLEAR_OBJ(zip_archive); if (!mz_zip_reader_init_file( &zip_archive, pZip_filename, flags | MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY)) return NULL; if ((file_index = mz_zip_reader_locate_file(&zip_archive, pArchive_name, NULL, flags)) >= 0) p = mz_zip_reader_extract_to_heap(&zip_archive, file_index, pSize, flags); mz_zip_reader_end(&zip_archive); return p; } #endif // #ifndef MINIZ_NO_STDIO #endif // #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS #endif // #ifndef MINIZ_NO_ARCHIVE_APIS #ifdef __cplusplus } #endif #endif // MINIZ_HEADER_FILE_ONLY /* This is free and unencumbered software released into the public domain. Anyone is free to copy, modify, publish, use, compile, sell, or distribute this software, either in source code form or as a compiled binary, for any purpose, commercial or non-commercial, and by any means. In jurisdictions that recognize copyright laws, the author or authors of this software dedicate any and all copyright interest in the software to the public domain. We make this dedication for the benefit of the public at large and to the detriment of our heirs and successors. We intend this dedication to be an overt act of relinquishment in perpetuity of all present and future rights to this software under copyright law. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. For more information, please refer to <http://unlicense.org/> */ // ---------------------- end of miniz ---------------------------------------- #ifdef __clang__ #pragma clang diagnostic pop #endif #ifdef _MSC_VER #pragma warning(pop) #endif } // namespace miniz #else // Reuse MINIZ_LITTE_ENDIAN macro #if defined(__sparcv9) // Big endian #else #if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || MINIZ_X86_OR_X64_CPU // Set MINIZ_LITTLE_ENDIAN to 1 if the processor is little endian. #define MINIZ_LITTLE_ENDIAN 1 #endif #endif #endif // TINYEXR_USE_MINIZ // static bool IsBigEndian(void) { // union { // unsigned int i; // char c[4]; // } bint = {0x01020304}; // // return bint.c[0] == 1; //} static void SetErrorMessage(const std::string &msg, const char **err) { if (err) { #ifdef _WIN32 (*err) = _strdup(msg.c_str()); #else (*err) = strdup(msg.c_str()); #endif } } static const int kEXRVersionSize = 8; static void cpy2(unsigned short *dst_val, const unsigned short *src_val) { unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val); const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val); dst[0] = src[0]; dst[1] = src[1]; } static void swap2(unsigned short *val) { #ifdef MINIZ_LITTLE_ENDIAN (void)val; #else unsigned short tmp = *val; unsigned char *dst = reinterpret_cast<unsigned char *>(val); unsigned char *src = reinterpret_cast<unsigned char *>(&tmp); dst[0] = src[1]; dst[1] = src[0]; #endif } static void cpy4(int *dst_val, const int *src_val) { unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val); const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val); dst[0] = src[0]; dst[1] = src[1]; dst[2] = src[2]; dst[3] = src[3]; } static void cpy4(unsigned int *dst_val, const unsigned int *src_val) { unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val); const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val); dst[0] = src[0]; dst[1] = src[1]; dst[2] = src[2]; dst[3] = src[3]; } static void cpy4(float *dst_val, const float *src_val) { unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val); const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val); dst[0] = src[0]; dst[1] = src[1]; dst[2] = src[2]; dst[3] = src[3]; } static void swap4(unsigned int *val) { #ifdef MINIZ_LITTLE_ENDIAN (void)val; #else unsigned int tmp = *val; unsigned char *dst = reinterpret_cast<unsigned char *>(val); unsigned char *src = reinterpret_cast<unsigned char *>(&tmp); dst[0] = src[3]; dst[1] = src[2]; dst[2] = src[1]; dst[3] = src[0]; #endif } #if 0 static void cpy8(tinyexr::tinyexr_uint64 *dst_val, const tinyexr::tinyexr_uint64 *src_val) { unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val); const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val); dst[0] = src[0]; dst[1] = src[1]; dst[2] = src[2]; dst[3] = src[3]; dst[4] = src[4]; dst[5] = src[5]; dst[6] = src[6]; dst[7] = src[7]; } #endif static void swap8(tinyexr::tinyexr_uint64 *val) { #ifdef MINIZ_LITTLE_ENDIAN (void)val; #else tinyexr::tinyexr_uint64 tmp = (*val); unsigned char *dst = reinterpret_cast<unsigned char *>(val); unsigned char *src = reinterpret_cast<unsigned char *>(&tmp); dst[0] = src[7]; dst[1] = src[6]; dst[2] = src[5]; dst[3] = src[4]; dst[4] = src[3]; dst[5] = src[2]; dst[6] = src[1]; dst[7] = src[0]; #endif } // https://gist.github.com/rygorous/2156668 // Reuse MINIZ_LITTLE_ENDIAN flag from miniz. union FP32 { unsigned int u; float f; struct { #if MINIZ_LITTLE_ENDIAN unsigned int Mantissa : 23; unsigned int Exponent : 8; unsigned int Sign : 1; #else unsigned int Sign : 1; unsigned int Exponent : 8; unsigned int Mantissa : 23; #endif } s; }; #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wpadded" #endif union FP16 { unsigned short u; struct { #if MINIZ_LITTLE_ENDIAN unsigned int Mantissa : 10; unsigned int Exponent : 5; unsigned int Sign : 1; #else unsigned int Sign : 1; unsigned int Exponent : 5; unsigned int Mantissa : 10; #endif } s; }; #ifdef __clang__ #pragma clang diagnostic pop #endif static FP32 half_to_float(FP16 h) { static const FP32 magic = {113 << 23}; static const unsigned int shifted_exp = 0x7c00 << 13; // exponent mask after shift FP32 o; o.u = (h.u & 0x7fffU) << 13U; // exponent/mantissa bits unsigned int exp_ = shifted_exp & o.u; // just the exponent o.u += (127 - 15) << 23; // exponent adjust // handle exponent special cases if (exp_ == shifted_exp) // Inf/NaN? o.u += (128 - 16) << 23; // extra exp adjust else if (exp_ == 0) // Zero/Denormal? { o.u += 1 << 23; // extra exp adjust o.f -= magic.f; // renormalize } o.u |= (h.u & 0x8000U) << 16U; // sign bit return o; } static FP16 float_to_half_full(FP32 f) { FP16 o = {0}; // Based on ISPC reference code (with minor modifications) if (f.s.Exponent == 0) // Signed zero/denormal (which will underflow) o.s.Exponent = 0; else if (f.s.Exponent == 255) // Inf or NaN (all exponent bits set) { o.s.Exponent = 31; o.s.Mantissa = f.s.Mantissa ? 0x200 : 0; // NaN->qNaN and Inf->Inf } else // Normalized number { // Exponent unbias the single, then bias the halfp int newexp = f.s.Exponent - 127 + 15; if (newexp >= 31) // Overflow, return signed infinity o.s.Exponent = 31; else if (newexp <= 0) // Underflow { if ((14 - newexp) <= 24) // Mantissa might be non-zero { unsigned int mant = f.s.Mantissa | 0x800000; // Hidden 1 bit o.s.Mantissa = mant >> (14 - newexp); if ((mant >> (13 - newexp)) & 1) // Check for rounding o.u++; // Round, might overflow into exp bit, but this is OK } } else { o.s.Exponent = static_cast<unsigned int>(newexp); o.s.Mantissa = f.s.Mantissa >> 13; if (f.s.Mantissa & 0x1000) // Check for rounding o.u++; // Round, might overflow to inf, this is OK } } o.s.Sign = f.s.Sign; return o; } // NOTE: From OpenEXR code // #define IMF_INCREASING_Y 0 // #define IMF_DECREASING_Y 1 // #define IMF_RAMDOM_Y 2 // // #define IMF_NO_COMPRESSION 0 // #define IMF_RLE_COMPRESSION 1 // #define IMF_ZIPS_COMPRESSION 2 // #define IMF_ZIP_COMPRESSION 3 // #define IMF_PIZ_COMPRESSION 4 // #define IMF_PXR24_COMPRESSION 5 // #define IMF_B44_COMPRESSION 6 // #define IMF_B44A_COMPRESSION 7 #ifdef __clang__ #pragma clang diagnostic push #if __has_warning("-Wzero-as-null-pointer-constant") #pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant" #endif #endif static const char *ReadString(std::string *s, const char *ptr, size_t len) { // Read untile NULL(\0). const char *p = ptr; const char *q = ptr; while ((size_t(q - ptr) < len) && (*q) != 0) { q++; } if (size_t(q - ptr) >= len) { (*s) = std::string(); return NULL; } (*s) = std::string(p, q); return q + 1; // skip '\0' } static bool ReadAttribute(std::string *name, std::string *type, std::vector<unsigned char> *data, size_t *marker_size, const char *marker, size_t size) { size_t name_len = strnlen(marker, size); if (name_len == size) { // String does not have a terminating character. return false; } *name = std::string(marker, name_len); marker += name_len + 1; size -= name_len + 1; size_t type_len = strnlen(marker, size); if (type_len == size) { return false; } *type = std::string(marker, type_len); marker += type_len + 1; size -= type_len + 1; if (size < sizeof(uint32_t)) { return false; } uint32_t data_len; memcpy(&data_len, marker, sizeof(uint32_t)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len)); if (data_len == 0) { if ((*type).compare("string") == 0) { // Accept empty string attribute. marker += sizeof(uint32_t); size -= sizeof(uint32_t); *marker_size = name_len + 1 + type_len + 1 + sizeof(uint32_t); data->resize(1); (*data)[0] = '\0'; return true; } else { return false; } } marker += sizeof(uint32_t); size -= sizeof(uint32_t); if (size < data_len) { return false; } data->resize(static_cast<size_t>(data_len)); memcpy(&data->at(0), marker, static_cast<size_t>(data_len)); *marker_size = name_len + 1 + type_len + 1 + sizeof(uint32_t) + data_len; return true; } static void WriteAttributeToMemory(std::vector<unsigned char> *out, const char *name, const char *type, const unsigned char *data, int len) { out->insert(out->end(), name, name + strlen(name) + 1); out->insert(out->end(), type, type + strlen(type) + 1); int outLen = len; tinyexr::swap4(reinterpret_cast<unsigned int *>(&outLen)); out->insert(out->end(), reinterpret_cast<unsigned char *>(&outLen), reinterpret_cast<unsigned char *>(&outLen) + sizeof(int)); out->insert(out->end(), data, data + len); } typedef struct { std::string name; // less than 255 bytes long int pixel_type; int x_sampling; int y_sampling; unsigned char p_linear; unsigned char pad[3]; } ChannelInfo; typedef struct { std::vector<tinyexr::ChannelInfo> channels; std::vector<EXRAttribute> attributes; int data_window[4]; int line_order; int display_window[4]; float screen_window_center[2]; float screen_window_width; float pixel_aspect_ratio; int chunk_count; // Tiled format int tile_size_x; int tile_size_y; int tile_level_mode; int tile_rounding_mode; unsigned int header_len; int compression_type; void clear() { channels.clear(); attributes.clear(); data_window[0] = 0; data_window[1] = 0; data_window[2] = 0; data_window[3] = 0; line_order = 0; display_window[0] = 0; display_window[1] = 0; display_window[2] = 0; display_window[3] = 0; screen_window_center[0] = 0.0f; screen_window_center[1] = 0.0f; screen_window_width = 0.0f; pixel_aspect_ratio = 0.0f; chunk_count = 0; // Tiled format tile_size_x = 0; tile_size_y = 0; tile_level_mode = 0; tile_rounding_mode = 0; header_len = 0; compression_type = 0; } } HeaderInfo; static bool ReadChannelInfo(std::vector<ChannelInfo> &channels, const std::vector<unsigned char> &data) { const char *p = reinterpret_cast<const char *>(&data.at(0)); for (;;) { if ((*p) == 0) { break; } ChannelInfo info; tinyexr_int64 data_len = static_cast<tinyexr_int64>(data.size()) - (p - reinterpret_cast<const char *>(data.data())); if (data_len < 0) { return false; } p = ReadString(&info.name, p, size_t(data_len)); if ((p == NULL) && (info.name.empty())) { // Buffer overrun. Issue #51. return false; } const unsigned char *data_end = reinterpret_cast<const unsigned char *>(p) + 16; if (data_end >= (data.data() + data.size())) { return false; } memcpy(&info.pixel_type, p, sizeof(int)); p += 4; info.p_linear = static_cast<unsigned char>(p[0]); // uchar p += 1 + 3; // reserved: uchar[3] memcpy(&info.x_sampling, p, sizeof(int)); // int p += 4; memcpy(&info.y_sampling, p, sizeof(int)); // int p += 4; tinyexr::swap4(reinterpret_cast<unsigned int *>(&info.pixel_type)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info.x_sampling)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info.y_sampling)); channels.push_back(info); } return true; } static void WriteChannelInfo(std::vector<unsigned char> &data, const std::vector<ChannelInfo> &channels) { size_t sz = 0; // Calculate total size. for (size_t c = 0; c < channels.size(); c++) { sz += strlen(channels[c].name.c_str()) + 1; // +1 for \0 sz += 16; // 4 * int } data.resize(sz + 1); unsigned char *p = &data.at(0); for (size_t c = 0; c < channels.size(); c++) { memcpy(p, channels[c].name.c_str(), strlen(channels[c].name.c_str())); p += strlen(channels[c].name.c_str()); (*p) = '\0'; p++; int pixel_type = channels[c].pixel_type; int x_sampling = channels[c].x_sampling; int y_sampling = channels[c].y_sampling; tinyexr::swap4(reinterpret_cast<unsigned int *>(&pixel_type)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&x_sampling)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&y_sampling)); memcpy(p, &pixel_type, sizeof(int)); p += sizeof(int); (*p) = channels[c].p_linear; p += 4; memcpy(p, &x_sampling, sizeof(int)); p += sizeof(int); memcpy(p, &y_sampling, sizeof(int)); p += sizeof(int); } (*p) = '\0'; } static void CompressZip(unsigned char *dst, tinyexr::tinyexr_uint64 &compressedSize, const unsigned char *src, unsigned long src_size) { std::vector<unsigned char> tmpBuf(src_size); // // Apply EXR-specific? postprocess. Grabbed from OpenEXR's // ImfZipCompressor.cpp // // // Reorder the pixel data. // const char *srcPtr = reinterpret_cast<const char *>(src); { char *t1 = reinterpret_cast<char *>(&tmpBuf.at(0)); char *t2 = reinterpret_cast<char *>(&tmpBuf.at(0)) + (src_size + 1) / 2; const char *stop = srcPtr + src_size; for (;;) { if (srcPtr < stop) *(t1++) = *(srcPtr++); else break; if (srcPtr < stop) *(t2++) = *(srcPtr++); else break; } } // // Predictor. // { unsigned char *t = &tmpBuf.at(0) + 1; unsigned char *stop = &tmpBuf.at(0) + src_size; int p = t[-1]; while (t < stop) { int d = int(t[0]) - p + (128 + 256); p = t[0]; t[0] = static_cast<unsigned char>(d); ++t; } } #if TINYEXR_USE_MINIZ // // Compress the data using miniz // miniz::mz_ulong outSize = miniz::mz_compressBound(src_size); int ret = miniz::mz_compress( dst, &outSize, static_cast<const unsigned char *>(&tmpBuf.at(0)), src_size); assert(ret == miniz::MZ_OK); (void)ret; compressedSize = outSize; #else uLong outSize = compressBound(static_cast<uLong>(src_size)); int ret = compress(dst, &outSize, static_cast<const Bytef *>(&tmpBuf.at(0)), src_size); assert(ret == Z_OK); compressedSize = outSize; #endif // Use uncompressed data when compressed data is larger than uncompressed. // (Issue 40) if (compressedSize >= src_size) { compressedSize = src_size; memcpy(dst, src, src_size); } } static bool DecompressZip(unsigned char *dst, unsigned long *uncompressed_size /* inout */, const unsigned char *src, unsigned long src_size) { if ((*uncompressed_size) == src_size) { // Data is not compressed(Issue 40). memcpy(dst, src, src_size); return true; } std::vector<unsigned char> tmpBuf(*uncompressed_size); #if TINYEXR_USE_MINIZ int ret = miniz::mz_uncompress(&tmpBuf.at(0), uncompressed_size, src, src_size); if (miniz::MZ_OK != ret) { return false; } #else int ret = uncompress(&tmpBuf.at(0), uncompressed_size, src, src_size); if (Z_OK != ret) { return false; } #endif // // Apply EXR-specific? postprocess. Grabbed from OpenEXR's // ImfZipCompressor.cpp // // Predictor. { unsigned char *t = &tmpBuf.at(0) + 1; unsigned char *stop = &tmpBuf.at(0) + (*uncompressed_size); while (t < stop) { int d = int(t[-1]) + int(t[0]) - 128; t[0] = static_cast<unsigned char>(d); ++t; } } // Reorder the pixel data. { const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0)); const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) + (*uncompressed_size + 1) / 2; char *s = reinterpret_cast<char *>(dst); char *stop = s + (*uncompressed_size); for (;;) { if (s < stop) *(s++) = *(t1++); else break; if (s < stop) *(s++) = *(t2++); else break; } } return true; } // RLE code from OpenEXR -------------------------------------- #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wsign-conversion" #endif #ifdef _MSC_VER #pragma warning(push) #pragma warning(disable : 4204) // nonstandard extension used : non-constant // aggregate initializer (also supported by GNU // C and C99, so no big deal) #pragma warning(disable : 4244) // 'initializing': conversion from '__int64' to // 'int', possible loss of data #pragma warning(disable : 4267) // 'argument': conversion from '__int64' to // 'int', possible loss of data #pragma warning(disable : 4996) // 'strdup': The POSIX name for this item is // deprecated. Instead, use the ISO C and C++ // conformant name: _strdup. #endif const int MIN_RUN_LENGTH = 3; const int MAX_RUN_LENGTH = 127; // // Compress an array of bytes, using run-length encoding, // and return the length of the compressed data. // static int rleCompress(int inLength, const char in[], signed char out[]) { const char *inEnd = in + inLength; const char *runStart = in; const char *runEnd = in + 1; signed char *outWrite = out; while (runStart < inEnd) { while (runEnd < inEnd && *runStart == *runEnd && runEnd - runStart - 1 < MAX_RUN_LENGTH) { ++runEnd; } if (runEnd - runStart >= MIN_RUN_LENGTH) { // // Compressable run // *outWrite++ = static_cast<char>(runEnd - runStart) - 1; *outWrite++ = *(reinterpret_cast<const signed char *>(runStart)); runStart = runEnd; } else { // // Uncompressable run // while (runEnd < inEnd && ((runEnd + 1 >= inEnd || *runEnd != *(runEnd + 1)) || (runEnd + 2 >= inEnd || *(runEnd + 1) != *(runEnd + 2))) && runEnd - runStart < MAX_RUN_LENGTH) { ++runEnd; } *outWrite++ = static_cast<char>(runStart - runEnd); while (runStart < runEnd) { *outWrite++ = *(reinterpret_cast<const signed char *>(runStart++)); } } ++runEnd; } return static_cast<int>(outWrite - out); } // // Uncompress an array of bytes compressed with rleCompress(). // Returns the length of the oncompressed data, or 0 if the // length of the uncompressed data would be more than maxLength. // static int rleUncompress(int inLength, int maxLength, const signed char in[], char out[]) { char *outStart = out; while (inLength > 0) { if (*in < 0) { int count = -(static_cast<int>(*in++)); inLength -= count + 1; if (0 > (maxLength -= count)) return 0; memcpy(out, in, count); out += count; in += count; } else { int count = *in++; inLength -= 2; if (0 > (maxLength -= count + 1)) return 0; memset(out, *reinterpret_cast<const char *>(in), count + 1); out += count + 1; in++; } } return static_cast<int>(out - outStart); } #ifdef __clang__ #pragma clang diagnostic pop #endif // End of RLE code from OpenEXR ----------------------------------- static void CompressRle(unsigned char *dst, tinyexr::tinyexr_uint64 &compressedSize, const unsigned char *src, unsigned long src_size) { std::vector<unsigned char> tmpBuf(src_size); // // Apply EXR-specific? postprocess. Grabbed from OpenEXR's // ImfRleCompressor.cpp // // // Reorder the pixel data. // const char *srcPtr = reinterpret_cast<const char *>(src); { char *t1 = reinterpret_cast<char *>(&tmpBuf.at(0)); char *t2 = reinterpret_cast<char *>(&tmpBuf.at(0)) + (src_size + 1) / 2; const char *stop = srcPtr + src_size; for (;;) { if (srcPtr < stop) *(t1++) = *(srcPtr++); else break; if (srcPtr < stop) *(t2++) = *(srcPtr++); else break; } } // // Predictor. // { unsigned char *t = &tmpBuf.at(0) + 1; unsigned char *stop = &tmpBuf.at(0) + src_size; int p = t[-1]; while (t < stop) { int d = int(t[0]) - p + (128 + 256); p = t[0]; t[0] = static_cast<unsigned char>(d); ++t; } } // outSize will be (srcSiz * 3) / 2 at max. int outSize = rleCompress(static_cast<int>(src_size), reinterpret_cast<const char *>(&tmpBuf.at(0)), reinterpret_cast<signed char *>(dst)); assert(outSize > 0); compressedSize = static_cast<tinyexr::tinyexr_uint64>(outSize); // Use uncompressed data when compressed data is larger than uncompressed. // (Issue 40) if (compressedSize >= src_size) { compressedSize = src_size; memcpy(dst, src, src_size); } } static void DecompressRle(unsigned char *dst, const unsigned long uncompressed_size, const unsigned char *src, unsigned long src_size) { if (uncompressed_size == src_size) { // Data is not compressed(Issue 40). memcpy(dst, src, src_size); return; } std::vector<unsigned char> tmpBuf(uncompressed_size); int ret = rleUncompress(static_cast<int>(src_size), static_cast<int>(uncompressed_size), reinterpret_cast<const signed char *>(src), reinterpret_cast<char *>(&tmpBuf.at(0))); assert(ret == static_cast<int>(uncompressed_size)); (void)ret; // // Apply EXR-specific? postprocess. Grabbed from OpenEXR's // ImfRleCompressor.cpp // // Predictor. { unsigned char *t = &tmpBuf.at(0) + 1; unsigned char *stop = &tmpBuf.at(0) + uncompressed_size; while (t < stop) { int d = int(t[-1]) + int(t[0]) - 128; t[0] = static_cast<unsigned char>(d); ++t; } } // Reorder the pixel data. { const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0)); const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) + (uncompressed_size + 1) / 2; char *s = reinterpret_cast<char *>(dst); char *stop = s + uncompressed_size; for (;;) { if (s < stop) *(s++) = *(t1++); else break; if (s < stop) *(s++) = *(t2++); else break; } } } #if TINYEXR_USE_PIZ #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wc++11-long-long" #pragma clang diagnostic ignored "-Wold-style-cast" #pragma clang diagnostic ignored "-Wpadded" #pragma clang diagnostic ignored "-Wsign-conversion" #pragma clang diagnostic ignored "-Wc++11-extensions" #pragma clang diagnostic ignored "-Wconversion" #pragma clang diagnostic ignored "-Wc++98-compat-pedantic" #if __has_warning("-Wcast-qual") #pragma clang diagnostic ignored "-Wcast-qual" #endif #endif // // PIZ compress/uncompress, based on OpenEXR's ImfPizCompressor.cpp // // ----------------------------------------------------------------- // Copyright (c) 2004, Industrial Light & Magic, a division of Lucas // Digital Ltd. LLC) // (3 clause BSD license) // struct PIZChannelData { unsigned short *start; unsigned short *end; int nx; int ny; int ys; int size; }; //----------------------------------------------------------------------------- // // 16-bit Haar Wavelet encoding and decoding // // The source code in this file is derived from the encoding // and decoding routines written by Christian Rouet for his // PIZ image file format. // //----------------------------------------------------------------------------- // // Wavelet basis functions without modulo arithmetic; they produce // the best compression ratios when the wavelet-transformed data are // Huffman-encoded, but the wavelet transform works only for 14-bit // data (untransformed data values must be less than (1 << 14)). // inline void wenc14(unsigned short a, unsigned short b, unsigned short &l, unsigned short &h) { short as = static_cast<short>(a); short bs = static_cast<short>(b); short ms = (as + bs) >> 1; short ds = as - bs; l = static_cast<unsigned short>(ms); h = static_cast<unsigned short>(ds); } inline void wdec14(unsigned short l, unsigned short h, unsigned short &a, unsigned short &b) { short ls = static_cast<short>(l); short hs = static_cast<short>(h); int hi = hs; int ai = ls + (hi & 1) + (hi >> 1); short as = static_cast<short>(ai); short bs = static_cast<short>(ai - hi); a = static_cast<unsigned short>(as); b = static_cast<unsigned short>(bs); } // // Wavelet basis functions with modulo arithmetic; they work with full // 16-bit data, but Huffman-encoding the wavelet-transformed data doesn't // compress the data quite as well. // const int NBITS = 16; const int A_OFFSET = 1 << (NBITS - 1); const int M_OFFSET = 1 << (NBITS - 1); const int MOD_MASK = (1 << NBITS) - 1; inline void wenc16(unsigned short a, unsigned short b, unsigned short &l, unsigned short &h) { int ao = (a + A_OFFSET) & MOD_MASK; int m = ((ao + b) >> 1); int d = ao - b; if (d < 0) m = (m + M_OFFSET) & MOD_MASK; d &= MOD_MASK; l = static_cast<unsigned short>(m); h = static_cast<unsigned short>(d); } inline void wdec16(unsigned short l, unsigned short h, unsigned short &a, unsigned short &b) { int m = l; int d = h; int bb = (m - (d >> 1)) & MOD_MASK; int aa = (d + bb - A_OFFSET) & MOD_MASK; b = static_cast<unsigned short>(bb); a = static_cast<unsigned short>(aa); } // // 2D Wavelet encoding: // static void wav2Encode( unsigned short *in, // io: values are transformed in place int nx, // i : x size int ox, // i : x offset int ny, // i : y size int oy, // i : y offset unsigned short mx) // i : maximum in[x][y] value { bool w14 = (mx < (1 << 14)); int n = (nx > ny) ? ny : nx; int p = 1; // == 1 << level int p2 = 2; // == 1 << (level+1) // // Hierachical loop on smaller dimension n // while (p2 <= n) { unsigned short *py = in; unsigned short *ey = in + oy * (ny - p2); int oy1 = oy * p; int oy2 = oy * p2; int ox1 = ox * p; int ox2 = ox * p2; unsigned short i00, i01, i10, i11; // // Y loop // for (; py <= ey; py += oy2) { unsigned short *px = py; unsigned short *ex = py + ox * (nx - p2); // // X loop // for (; px <= ex; px += ox2) { unsigned short *p01 = px + ox1; unsigned short *p10 = px + oy1; unsigned short *p11 = p10 + ox1; // // 2D wavelet encoding // if (w14) { wenc14(*px, *p01, i00, i01); wenc14(*p10, *p11, i10, i11); wenc14(i00, i10, *px, *p10); wenc14(i01, i11, *p01, *p11); } else { wenc16(*px, *p01, i00, i01); wenc16(*p10, *p11, i10, i11); wenc16(i00, i10, *px, *p10); wenc16(i01, i11, *p01, *p11); } } // // Encode (1D) odd column (still in Y loop) // if (nx & p) { unsigned short *p10 = px + oy1; if (w14) wenc14(*px, *p10, i00, *p10); else wenc16(*px, *p10, i00, *p10); *px = i00; } } // // Encode (1D) odd line (must loop in X) // if (ny & p) { unsigned short *px = py; unsigned short *ex = py + ox * (nx - p2); for (; px <= ex; px += ox2) { unsigned short *p01 = px + ox1; if (w14) wenc14(*px, *p01, i00, *p01); else wenc16(*px, *p01, i00, *p01); *px = i00; } } // // Next level // p = p2; p2 <<= 1; } } // // 2D Wavelet decoding: // static void wav2Decode( unsigned short *in, // io: values are transformed in place int nx, // i : x size int ox, // i : x offset int ny, // i : y size int oy, // i : y offset unsigned short mx) // i : maximum in[x][y] value { bool w14 = (mx < (1 << 14)); int n = (nx > ny) ? ny : nx; int p = 1; int p2; // // Search max level // while (p <= n) p <<= 1; p >>= 1; p2 = p; p >>= 1; // // Hierarchical loop on smaller dimension n // while (p >= 1) { unsigned short *py = in; unsigned short *ey = in + oy * (ny - p2); int oy1 = oy * p; int oy2 = oy * p2; int ox1 = ox * p; int ox2 = ox * p2; unsigned short i00, i01, i10, i11; // // Y loop // for (; py <= ey; py += oy2) { unsigned short *px = py; unsigned short *ex = py + ox * (nx - p2); // // X loop // for (; px <= ex; px += ox2) { unsigned short *p01 = px + ox1; unsigned short *p10 = px + oy1; unsigned short *p11 = p10 + ox1; // // 2D wavelet decoding // if (w14) { wdec14(*px, *p10, i00, i10); wdec14(*p01, *p11, i01, i11); wdec14(i00, i01, *px, *p01); wdec14(i10, i11, *p10, *p11); } else { wdec16(*px, *p10, i00, i10); wdec16(*p01, *p11, i01, i11); wdec16(i00, i01, *px, *p01); wdec16(i10, i11, *p10, *p11); } } // // Decode (1D) odd column (still in Y loop) // if (nx & p) { unsigned short *p10 = px + oy1; if (w14) wdec14(*px, *p10, i00, *p10); else wdec16(*px, *p10, i00, *p10); *px = i00; } } // // Decode (1D) odd line (must loop in X) // if (ny & p) { unsigned short *px = py; unsigned short *ex = py + ox * (nx - p2); for (; px <= ex; px += ox2) { unsigned short *p01 = px + ox1; if (w14) wdec14(*px, *p01, i00, *p01); else wdec16(*px, *p01, i00, *p01); *px = i00; } } // // Next level // p2 = p; p >>= 1; } } //----------------------------------------------------------------------------- // // 16-bit Huffman compression and decompression. // // The source code in this file is derived from the 8-bit // Huffman compression and decompression routines written // by Christian Rouet for his PIZ image file format. // //----------------------------------------------------------------------------- // Adds some modification for tinyexr. const int HUF_ENCBITS = 16; // literal (value) bit length const int HUF_DECBITS = 14; // decoding bit size (>= 8) const int HUF_ENCSIZE = (1 << HUF_ENCBITS) + 1; // encoding table size const int HUF_DECSIZE = 1 << HUF_DECBITS; // decoding table size const int HUF_DECMASK = HUF_DECSIZE - 1; struct HufDec { // short code long code //------------------------------- int len : 8; // code length 0 int lit : 24; // lit p size int *p; // 0 lits }; inline long long hufLength(long long code) { return code & 63; } inline long long hufCode(long long code) { return code >> 6; } inline void outputBits(int nBits, long long bits, long long &c, int &lc, char *&out) { c <<= nBits; lc += nBits; c |= bits; while (lc >= 8) *out++ = static_cast<char>((c >> (lc -= 8))); } inline long long getBits(int nBits, long long &c, int &lc, const char *&in) { while (lc < nBits) { c = (c << 8) | *(reinterpret_cast<const unsigned char *>(in++)); lc += 8; } lc -= nBits; return (c >> lc) & ((1 << nBits) - 1); } // // ENCODING TABLE BUILDING & (UN)PACKING // // // Build a "canonical" Huffman code table: // - for each (uncompressed) symbol, hcode contains the length // of the corresponding code (in the compressed data) // - canonical codes are computed and stored in hcode // - the rules for constructing canonical codes are as follows: // * shorter codes (if filled with zeroes to the right) // have a numerically higher value than longer codes // * for codes with the same length, numerical values // increase with numerical symbol values // - because the canonical code table can be constructed from // symbol lengths alone, the code table can be transmitted // without sending the actual code values // - see http://www.compressconsult.com/huffman/ // static void hufCanonicalCodeTable(long long hcode[HUF_ENCSIZE]) { long long n[59]; // // For each i from 0 through 58, count the // number of different codes of length i, and // store the count in n[i]. // for (int i = 0; i <= 58; ++i) n[i] = 0; for (int i = 0; i < HUF_ENCSIZE; ++i) n[hcode[i]] += 1; // // For each i from 58 through 1, compute the // numerically lowest code with length i, and // store that code in n[i]. // long long c = 0; for (int i = 58; i > 0; --i) { long long nc = ((c + n[i]) >> 1); n[i] = c; c = nc; } // // hcode[i] contains the length, l, of the // code for symbol i. Assign the next available // code of length l to the symbol and store both // l and the code in hcode[i]. // for (int i = 0; i < HUF_ENCSIZE; ++i) { int l = static_cast<int>(hcode[i]); if (l > 0) hcode[i] = l | (n[l]++ << 6); } } // // Compute Huffman codes (based on frq input) and store them in frq: // - code structure is : [63:lsb - 6:msb] | [5-0: bit length]; // - max code length is 58 bits; // - codes outside the range [im-iM] have a null length (unused values); // - original frequencies are destroyed; // - encoding tables are used by hufEncode() and hufBuildDecTable(); // struct FHeapCompare { bool operator()(long long *a, long long *b) { return *a > *b; } }; static void hufBuildEncTable( long long *frq, // io: input frequencies [HUF_ENCSIZE], output table int *im, // o: min frq index int *iM) // o: max frq index { // // This function assumes that when it is called, array frq // indicates the frequency of all possible symbols in the data // that are to be Huffman-encoded. (frq[i] contains the number // of occurrences of symbol i in the data.) // // The loop below does three things: // // 1) Finds the minimum and maximum indices that point // to non-zero entries in frq: // // frq[im] != 0, and frq[i] == 0 for all i < im // frq[iM] != 0, and frq[i] == 0 for all i > iM // // 2) Fills array fHeap with pointers to all non-zero // entries in frq. // // 3) Initializes array hlink such that hlink[i] == i // for all array entries. // std::vector<int> hlink(HUF_ENCSIZE); std::vector<long long *> fHeap(HUF_ENCSIZE); *im = 0; while (!frq[*im]) (*im)++; int nf = 0; for (int i = *im; i < HUF_ENCSIZE; i++) { hlink[i] = i; if (frq[i]) { fHeap[nf] = &frq[i]; nf++; *iM = i; } } // // Add a pseudo-symbol, with a frequency count of 1, to frq; // adjust the fHeap and hlink array accordingly. Function // hufEncode() uses the pseudo-symbol for run-length encoding. // (*iM)++; frq[*iM] = 1; fHeap[nf] = &frq[*iM]; nf++; // // Build an array, scode, such that scode[i] contains the number // of bits assigned to symbol i. Conceptually this is done by // constructing a tree whose leaves are the symbols with non-zero // frequency: // // Make a heap that contains all symbols with a non-zero frequency, // with the least frequent symbol on top. // // Repeat until only one symbol is left on the heap: // // Take the two least frequent symbols off the top of the heap. // Create a new node that has first two nodes as children, and // whose frequency is the sum of the frequencies of the first // two nodes. Put the new node back into the heap. // // The last node left on the heap is the root of the tree. For each // leaf node, the distance between the root and the leaf is the length // of the code for the corresponding symbol. // // The loop below doesn't actually build the tree; instead we compute // the distances of the leaves from the root on the fly. When a new // node is added to the heap, then that node's descendants are linked // into a single linear list that starts at the new node, and the code // lengths of the descendants (that is, their distance from the root // of the tree) are incremented by one. // std::make_heap(&fHeap[0], &fHeap[nf], FHeapCompare()); std::vector<long long> scode(HUF_ENCSIZE); memset(scode.data(), 0, sizeof(long long) * HUF_ENCSIZE); while (nf > 1) { // // Find the indices, mm and m, of the two smallest non-zero frq // values in fHeap, add the smallest frq to the second-smallest // frq, and remove the smallest frq value from fHeap. // int mm = fHeap[0] - frq; std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare()); --nf; int m = fHeap[0] - frq; std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare()); frq[m] += frq[mm]; std::push_heap(&fHeap[0], &fHeap[nf], FHeapCompare()); // // The entries in scode are linked into lists with the // entries in hlink serving as "next" pointers and with // the end of a list marked by hlink[j] == j. // // Traverse the lists that start at scode[m] and scode[mm]. // For each element visited, increment the length of the // corresponding code by one bit. (If we visit scode[j] // during the traversal, then the code for symbol j becomes // one bit longer.) // // Merge the lists that start at scode[m] and scode[mm] // into a single list that starts at scode[m]. // // // Add a bit to all codes in the first list. // for (int j = m;; j = hlink[j]) { scode[j]++; assert(scode[j] <= 58); if (hlink[j] == j) { // // Merge the two lists. // hlink[j] = mm; break; } } // // Add a bit to all codes in the second list // for (int j = mm;; j = hlink[j]) { scode[j]++; assert(scode[j] <= 58); if (hlink[j] == j) break; } } // // Build a canonical Huffman code table, replacing the code // lengths in scode with (code, code length) pairs. Copy the // code table from scode into frq. // hufCanonicalCodeTable(scode.data()); memcpy(frq, scode.data(), sizeof(long long) * HUF_ENCSIZE); } // // Pack an encoding table: // - only code lengths, not actual codes, are stored // - runs of zeroes are compressed as follows: // // unpacked packed // -------------------------------- // 1 zero 0 (6 bits) // 2 zeroes 59 // 3 zeroes 60 // 4 zeroes 61 // 5 zeroes 62 // n zeroes (6 or more) 63 n-6 (6 + 8 bits) // const int SHORT_ZEROCODE_RUN = 59; const int LONG_ZEROCODE_RUN = 63; const int SHORTEST_LONG_RUN = 2 + LONG_ZEROCODE_RUN - SHORT_ZEROCODE_RUN; const int LONGEST_LONG_RUN = 255 + SHORTEST_LONG_RUN; static void hufPackEncTable( const long long *hcode, // i : encoding table [HUF_ENCSIZE] int im, // i : min hcode index int iM, // i : max hcode index char **pcode) // o: ptr to packed table (updated) { char *p = *pcode; long long c = 0; int lc = 0; for (; im <= iM; im++) { int l = hufLength(hcode[im]); if (l == 0) { int zerun = 1; while ((im < iM) && (zerun < LONGEST_LONG_RUN)) { if (hufLength(hcode[im + 1]) > 0) break; im++; zerun++; } if (zerun >= 2) { if (zerun >= SHORTEST_LONG_RUN) { outputBits(6, LONG_ZEROCODE_RUN, c, lc, p); outputBits(8, zerun - SHORTEST_LONG_RUN, c, lc, p); } else { outputBits(6, SHORT_ZEROCODE_RUN + zerun - 2, c, lc, p); } continue; } } outputBits(6, l, c, lc, p); } if (lc > 0) *p++ = (unsigned char)(c << (8 - lc)); *pcode = p; } // // Unpack an encoding table packed by hufPackEncTable(): // static bool hufUnpackEncTable( const char **pcode, // io: ptr to packed table (updated) int ni, // i : input size (in bytes) int im, // i : min hcode index int iM, // i : max hcode index long long *hcode) // o: encoding table [HUF_ENCSIZE] { memset(hcode, 0, sizeof(long long) * HUF_ENCSIZE); const char *p = *pcode; long long c = 0; int lc = 0; for (; im <= iM; im++) { if (p - *pcode > ni) { return false; } long long l = hcode[im] = getBits(6, c, lc, p); // code length if (l == (long long)LONG_ZEROCODE_RUN) { if (p - *pcode > ni) { return false; } int zerun = getBits(8, c, lc, p) + SHORTEST_LONG_RUN; if (im + zerun > iM + 1) { return false; } while (zerun--) hcode[im++] = 0; im--; } else if (l >= (long long)SHORT_ZEROCODE_RUN) { int zerun = l - SHORT_ZEROCODE_RUN + 2; if (im + zerun > iM + 1) { return false; } while (zerun--) hcode[im++] = 0; im--; } } *pcode = const_cast<char *>(p); hufCanonicalCodeTable(hcode); return true; } // // DECODING TABLE BUILDING // // // Clear a newly allocated decoding table so that it contains only zeroes. // static void hufClearDecTable(HufDec *hdecod) // io: (allocated by caller) // decoding table [HUF_DECSIZE] { for (int i = 0; i < HUF_DECSIZE; i++) { hdecod[i].len = 0; hdecod[i].lit = 0; hdecod[i].p = NULL; } // memset(hdecod, 0, sizeof(HufDec) * HUF_DECSIZE); } // // Build a decoding hash table based on the encoding table hcode: // - short codes (<= HUF_DECBITS) are resolved with a single table access; // - long code entry allocations are not optimized, because long codes are // unfrequent; // - decoding tables are used by hufDecode(); // static bool hufBuildDecTable(const long long *hcode, // i : encoding table int im, // i : min index in hcode int iM, // i : max index in hcode HufDec *hdecod) // o: (allocated by caller) // decoding table [HUF_DECSIZE] { // // Init hashtable & loop on all codes. // Assumes that hufClearDecTable(hdecod) has already been called. // for (; im <= iM; im++) { long long c = hufCode(hcode[im]); int l = hufLength(hcode[im]); if (c >> l) { // // Error: c is supposed to be an l-bit code, // but c contains a value that is greater // than the largest l-bit number. // // invalidTableEntry(); return false; } if (l > HUF_DECBITS) { // // Long code: add a secondary entry // HufDec *pl = hdecod + (c >> (l - HUF_DECBITS)); if (pl->len) { // // Error: a short code has already // been stored in table entry *pl. // // invalidTableEntry(); return false; } pl->lit++; if (pl->p) { int *p = pl->p; pl->p = new int[pl->lit]; for (int i = 0; i < pl->lit - 1; ++i) pl->p[i] = p[i]; delete[] p; } else { pl->p = new int[1]; } pl->p[pl->lit - 1] = im; } else if (l) { // // Short code: init all primary entries // HufDec *pl = hdecod + (c << (HUF_DECBITS - l)); for (long long i = 1ULL << (HUF_DECBITS - l); i > 0; i--, pl++) { if (pl->len || pl->p) { // // Error: a short code or a long code has // already been stored in table entry *pl. // // invalidTableEntry(); return false; } pl->len = l; pl->lit = im; } } } return true; } // // Free the long code entries of a decoding table built by hufBuildDecTable() // static void hufFreeDecTable(HufDec *hdecod) // io: Decoding table { for (int i = 0; i < HUF_DECSIZE; i++) { if (hdecod[i].p) { delete[] hdecod[i].p; hdecod[i].p = 0; } } } // // ENCODING // inline void outputCode(long long code, long long &c, int &lc, char *&out) { outputBits(hufLength(code), hufCode(code), c, lc, out); } inline void sendCode(long long sCode, int runCount, long long runCode, long long &c, int &lc, char *&out) { // // Output a run of runCount instances of the symbol sCount. // Output the symbols explicitly, or if that is shorter, output // the sCode symbol once followed by a runCode symbol and runCount // expressed as an 8-bit number. // if (hufLength(sCode) + hufLength(runCode) + 8 < hufLength(sCode) * runCount) { outputCode(sCode, c, lc, out); outputCode(runCode, c, lc, out); outputBits(8, runCount, c, lc, out); } else { while (runCount-- >= 0) outputCode(sCode, c, lc, out); } } // // Encode (compress) ni values based on the Huffman encoding table hcode: // static int hufEncode // return: output size (in bits) (const long long *hcode, // i : encoding table const unsigned short *in, // i : uncompressed input buffer const int ni, // i : input buffer size (in bytes) int rlc, // i : rl code char *out) // o: compressed output buffer { char *outStart = out; long long c = 0; // bits not yet written to out int lc = 0; // number of valid bits in c (LSB) int s = in[0]; int cs = 0; // // Loop on input values // for (int i = 1; i < ni; i++) { // // Count same values or send code // if (s == in[i] && cs < 255) { cs++; } else { sendCode(hcode[s], cs, hcode[rlc], c, lc, out); cs = 0; } s = in[i]; } // // Send remaining code // sendCode(hcode[s], cs, hcode[rlc], c, lc, out); if (lc) *out = (c << (8 - lc)) & 0xff; return (out - outStart) * 8 + lc; } // // DECODING // // // In order to force the compiler to inline them, // getChar() and getCode() are implemented as macros // instead of "inline" functions. // #define getChar(c, lc, in) \ { \ c = (c << 8) | *(unsigned char *)(in++); \ lc += 8; \ } #if 0 #define getCode(po, rlc, c, lc, in, out, ob, oe) \ { \ if (po == rlc) { \ if (lc < 8) getChar(c, lc, in); \ \ lc -= 8; \ \ unsigned char cs = (c >> lc); \ \ if (out + cs > oe) return false; \ \ /* TinyEXR issue 78 */ \ unsigned short s = out[-1]; \ \ while (cs-- > 0) *out++ = s; \ } else if (out < oe) { \ *out++ = po; \ } else { \ return false; \ } \ } #else static bool getCode(int po, int rlc, long long &c, int &lc, const char *&in, const char *in_end, unsigned short *&out, const unsigned short *ob, const unsigned short *oe) { (void)ob; if (po == rlc) { if (lc < 8) { /* TinyEXR issue 78 */ if ((in + 1) >= in_end) { return false; } getChar(c, lc, in); } lc -= 8; unsigned char cs = (c >> lc); if (out + cs > oe) return false; // Bounds check for safety if ((out - 1) <= ob) return false; unsigned short s = out[-1]; while (cs-- > 0) *out++ = s; } else if (out < oe) { *out++ = po; } else { return false; } return true; } #endif // // Decode (uncompress) ni bits based on encoding & decoding tables: // static bool hufDecode(const long long *hcode, // i : encoding table const HufDec *hdecod, // i : decoding table const char *in, // i : compressed input buffer int ni, // i : input size (in bits) int rlc, // i : run-length code int no, // i : expected output size (in bytes) unsigned short *out) // o: uncompressed output buffer { long long c = 0; int lc = 0; unsigned short *outb = out; // begin unsigned short *oe = out + no; // end const char *ie = in + (ni + 7) / 8; // input byte size // // Loop on input bytes // while (in < ie) { getChar(c, lc, in); // // Access decoding table // while (lc >= HUF_DECBITS) { const HufDec pl = hdecod[(c >> (lc - HUF_DECBITS)) & HUF_DECMASK]; if (pl.len) { // // Get short code // lc -= pl.len; // std::cout << "lit = " << pl.lit << std::endl; // std::cout << "rlc = " << rlc << std::endl; // std::cout << "c = " << c << std::endl; // std::cout << "lc = " << lc << std::endl; // std::cout << "in = " << in << std::endl; // std::cout << "out = " << out << std::endl; // std::cout << "oe = " << oe << std::endl; if (!getCode(pl.lit, rlc, c, lc, in, ie, out, outb, oe)) { return false; } } else { if (!pl.p) { return false; } // invalidCode(); // wrong code // // Search long code // int j; for (j = 0; j < pl.lit; j++) { int l = hufLength(hcode[pl.p[j]]); while (lc < l && in < ie) // get more bits getChar(c, lc, in); if (lc >= l) { if (hufCode(hcode[pl.p[j]]) == ((c >> (lc - l)) & (((long long)(1) << l) - 1))) { // // Found : get long code // lc -= l; if (!getCode(pl.p[j], rlc, c, lc, in, ie, out, outb, oe)) { return false; } break; } } } if (j == pl.lit) { return false; // invalidCode(); // Not found } } } } // // Get remaining (short) codes // int i = (8 - ni) & 7; c >>= i; lc -= i; while (lc > 0) { const HufDec pl = hdecod[(c << (HUF_DECBITS - lc)) & HUF_DECMASK]; if (pl.len) { lc -= pl.len; if (!getCode(pl.lit, rlc, c, lc, in, ie, out, outb, oe)) { return false; } } else { return false; // invalidCode(); // wrong (long) code } } if (out - outb != no) { return false; } // notEnoughData (); return true; } static void countFrequencies(std::vector<long long> &freq, const unsigned short data[/*n*/], int n) { for (int i = 0; i < HUF_ENCSIZE; ++i) freq[i] = 0; for (int i = 0; i < n; ++i) ++freq[data[i]]; } static void writeUInt(char buf[4], unsigned int i) { unsigned char *b = (unsigned char *)buf; b[0] = i; b[1] = i >> 8; b[2] = i >> 16; b[3] = i >> 24; } static unsigned int readUInt(const char buf[4]) { const unsigned char *b = (const unsigned char *)buf; return (b[0] & 0x000000ff) | ((b[1] << 8) & 0x0000ff00) | ((b[2] << 16) & 0x00ff0000) | ((b[3] << 24) & 0xff000000); } // // EXTERNAL INTERFACE // static int hufCompress(const unsigned short raw[], int nRaw, char compressed[]) { if (nRaw == 0) return 0; std::vector<long long> freq(HUF_ENCSIZE); countFrequencies(freq, raw, nRaw); int im = 0; int iM = 0; hufBuildEncTable(freq.data(), &im, &iM); char *tableStart = compressed + 20; char *tableEnd = tableStart; hufPackEncTable(freq.data(), im, iM, &tableEnd); int tableLength = tableEnd - tableStart; char *dataStart = tableEnd; int nBits = hufEncode(freq.data(), raw, nRaw, iM, dataStart); int data_length = (nBits + 7) / 8; writeUInt(compressed, im); writeUInt(compressed + 4, iM); writeUInt(compressed + 8, tableLength); writeUInt(compressed + 12, nBits); writeUInt(compressed + 16, 0); // room for future extensions return dataStart + data_length - compressed; } static bool hufUncompress(const char compressed[], int nCompressed, std::vector<unsigned short> *raw) { if (nCompressed == 0) { if (raw->size() != 0) return false; return false; } int im = readUInt(compressed); int iM = readUInt(compressed + 4); // int tableLength = readUInt (compressed + 8); int nBits = readUInt(compressed + 12); if (im < 0 || im >= HUF_ENCSIZE || iM < 0 || iM >= HUF_ENCSIZE) return false; const char *ptr = compressed + 20; // // Fast decoder needs at least 2x64-bits of compressed data, and // needs to be run-able on this platform. Otherwise, fall back // to the original decoder // // if (FastHufDecoder::enabled() && nBits > 128) //{ // FastHufDecoder fhd (ptr, nCompressed - (ptr - compressed), im, iM, iM); // fhd.decode ((unsigned char*)ptr, nBits, raw, nRaw); //} // else { std::vector<long long> freq(HUF_ENCSIZE); std::vector<HufDec> hdec(HUF_DECSIZE); hufClearDecTable(&hdec.at(0)); hufUnpackEncTable(&ptr, nCompressed - (ptr - compressed), im, iM, &freq.at(0)); { if (nBits > 8 * (nCompressed - (ptr - compressed))) { return false; } hufBuildDecTable(&freq.at(0), im, iM, &hdec.at(0)); hufDecode(&freq.at(0), &hdec.at(0), ptr, nBits, iM, raw->size(), raw->data()); } // catch (...) //{ // hufFreeDecTable (hdec); // throw; //} hufFreeDecTable(&hdec.at(0)); } return true; } // // Functions to compress the range of values in the pixel data // const int USHORT_RANGE = (1 << 16); const int BITMAP_SIZE = (USHORT_RANGE >> 3); static void bitmapFromData(const unsigned short data[/*nData*/], int nData, unsigned char bitmap[BITMAP_SIZE], unsigned short &minNonZero, unsigned short &maxNonZero) { for (int i = 0; i < BITMAP_SIZE; ++i) bitmap[i] = 0; for (int i = 0; i < nData; ++i) bitmap[data[i] >> 3] |= (1 << (data[i] & 7)); bitmap[0] &= ~1; // zero is not explicitly stored in // the bitmap; we assume that the // data always contain zeroes minNonZero = BITMAP_SIZE - 1; maxNonZero = 0; for (int i = 0; i < BITMAP_SIZE; ++i) { if (bitmap[i]) { if (minNonZero > i) minNonZero = i; if (maxNonZero < i) maxNonZero = i; } } } static unsigned short forwardLutFromBitmap( const unsigned char bitmap[BITMAP_SIZE], unsigned short lut[USHORT_RANGE]) { int k = 0; for (int i = 0; i < USHORT_RANGE; ++i) { if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7)))) lut[i] = k++; else lut[i] = 0; } return k - 1; // maximum value stored in lut[], } // i.e. number of ones in bitmap minus 1 static unsigned short reverseLutFromBitmap( const unsigned char bitmap[BITMAP_SIZE], unsigned short lut[USHORT_RANGE]) { int k = 0; for (int i = 0; i < USHORT_RANGE; ++i) { if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7)))) lut[k++] = i; } int n = k - 1; while (k < USHORT_RANGE) lut[k++] = 0; return n; // maximum k where lut[k] is non-zero, } // i.e. number of ones in bitmap minus 1 static void applyLut(const unsigned short lut[USHORT_RANGE], unsigned short data[/*nData*/], int nData) { for (int i = 0; i < nData; ++i) data[i] = lut[data[i]]; } #ifdef __clang__ #pragma clang diagnostic pop #endif // __clang__ #ifdef _MSC_VER #pragma warning(pop) #endif static bool CompressPiz(unsigned char *outPtr, unsigned int *outSize, const unsigned char *inPtr, size_t inSize, const std::vector<ChannelInfo> &channelInfo, int data_width, int num_lines) { std::vector<unsigned char> bitmap(BITMAP_SIZE); unsigned short minNonZero; unsigned short maxNonZero; #if !MINIZ_LITTLE_ENDIAN // @todo { PIZ compression on BigEndian architecture. } assert(0); return false; #endif // Assume `inSize` is multiple of 2 or 4. std::vector<unsigned short> tmpBuffer(inSize / sizeof(unsigned short)); std::vector<PIZChannelData> channelData(channelInfo.size()); unsigned short *tmpBufferEnd = &tmpBuffer.at(0); for (size_t c = 0; c < channelData.size(); c++) { PIZChannelData &cd = channelData[c]; cd.start = tmpBufferEnd; cd.end = cd.start; cd.nx = data_width; cd.ny = num_lines; // cd.ys = c.channel().ySampling; size_t pixelSize = sizeof(int); // UINT and FLOAT if (channelInfo[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { pixelSize = sizeof(short); } cd.size = static_cast<int>(pixelSize / sizeof(short)); tmpBufferEnd += cd.nx * cd.ny * cd.size; } const unsigned char *ptr = inPtr; for (int y = 0; y < num_lines; ++y) { for (size_t i = 0; i < channelData.size(); ++i) { PIZChannelData &cd = channelData[i]; // if (modp (y, cd.ys) != 0) // continue; size_t n = static_cast<size_t>(cd.nx * cd.size); memcpy(cd.end, ptr, n * sizeof(unsigned short)); ptr += n * sizeof(unsigned short); cd.end += n; } } bitmapFromData(&tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()), bitmap.data(), minNonZero, maxNonZero); std::vector<unsigned short> lut(USHORT_RANGE); unsigned short maxValue = forwardLutFromBitmap(bitmap.data(), lut.data()); applyLut(lut.data(), &tmpBuffer.at(0), static_cast<int>(tmpBuffer.size())); // // Store range compression info in _outBuffer // char *buf = reinterpret_cast<char *>(outPtr); memcpy(buf, &minNonZero, sizeof(unsigned short)); buf += sizeof(unsigned short); memcpy(buf, &maxNonZero, sizeof(unsigned short)); buf += sizeof(unsigned short); if (minNonZero <= maxNonZero) { memcpy(buf, reinterpret_cast<char *>(&bitmap[0] + minNonZero), maxNonZero - minNonZero + 1); buf += maxNonZero - minNonZero + 1; } // // Apply wavelet encoding // for (size_t i = 0; i < channelData.size(); ++i) { PIZChannelData &cd = channelData[i]; for (int j = 0; j < cd.size; ++j) { wav2Encode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size, maxValue); } } // // Apply Huffman encoding; append the result to _outBuffer // // length header(4byte), then huff data. Initialize length header with zero, // then later fill it by `length`. char *lengthPtr = buf; int zero = 0; memcpy(buf, &zero, sizeof(int)); buf += sizeof(int); int length = hufCompress(&tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()), buf); memcpy(lengthPtr, &length, sizeof(int)); (*outSize) = static_cast<unsigned int>( (reinterpret_cast<unsigned char *>(buf) - outPtr) + static_cast<unsigned int>(length)); // Use uncompressed data when compressed data is larger than uncompressed. // (Issue 40) if ((*outSize) >= inSize) { (*outSize) = static_cast<unsigned int>(inSize); memcpy(outPtr, inPtr, inSize); } return true; } static bool DecompressPiz(unsigned char *outPtr, const unsigned char *inPtr, size_t tmpBufSize, size_t inLen, int num_channels, const EXRChannelInfo *channels, int data_width, int num_lines) { if (inLen == tmpBufSize) { // Data is not compressed(Issue 40). memcpy(outPtr, inPtr, inLen); return true; } std::vector<unsigned char> bitmap(BITMAP_SIZE); unsigned short minNonZero; unsigned short maxNonZero; #if !MINIZ_LITTLE_ENDIAN // @todo { PIZ compression on BigEndian architecture. } assert(0); return false; #endif memset(bitmap.data(), 0, BITMAP_SIZE); const unsigned char *ptr = inPtr; // minNonZero = *(reinterpret_cast<const unsigned short *>(ptr)); tinyexr::cpy2(&minNonZero, reinterpret_cast<const unsigned short *>(ptr)); // maxNonZero = *(reinterpret_cast<const unsigned short *>(ptr + 2)); tinyexr::cpy2(&maxNonZero, reinterpret_cast<const unsigned short *>(ptr + 2)); ptr += 4; if (maxNonZero >= BITMAP_SIZE) { return false; } if (minNonZero <= maxNonZero) { memcpy(reinterpret_cast<char *>(&bitmap[0] + minNonZero), ptr, maxNonZero - minNonZero + 1); ptr += maxNonZero - minNonZero + 1; } std::vector<unsigned short> lut(USHORT_RANGE); memset(lut.data(), 0, sizeof(unsigned short) * USHORT_RANGE); unsigned short maxValue = reverseLutFromBitmap(bitmap.data(), lut.data()); // // Huffman decoding // int length; // length = *(reinterpret_cast<const int *>(ptr)); tinyexr::cpy4(&length, reinterpret_cast<const int *>(ptr)); ptr += sizeof(int); std::vector<unsigned short> tmpBuffer(tmpBufSize); hufUncompress(reinterpret_cast<const char *>(ptr), length, &tmpBuffer); // // Wavelet decoding // std::vector<PIZChannelData> channelData(static_cast<size_t>(num_channels)); unsigned short *tmpBufferEnd = &tmpBuffer.at(0); for (size_t i = 0; i < static_cast<size_t>(num_channels); ++i) { const EXRChannelInfo &chan = channels[i]; size_t pixelSize = sizeof(int); // UINT and FLOAT if (chan.pixel_type == TINYEXR_PIXELTYPE_HALF) { pixelSize = sizeof(short); } channelData[i].start = tmpBufferEnd; channelData[i].end = channelData[i].start; channelData[i].nx = data_width; channelData[i].ny = num_lines; // channelData[i].ys = 1; channelData[i].size = static_cast<int>(pixelSize / sizeof(short)); tmpBufferEnd += channelData[i].nx * channelData[i].ny * channelData[i].size; } for (size_t i = 0; i < channelData.size(); ++i) { PIZChannelData &cd = channelData[i]; for (int j = 0; j < cd.size; ++j) { wav2Decode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size, maxValue); } } // // Expand the pixel data to their original range // applyLut(lut.data(), &tmpBuffer.at(0), static_cast<int>(tmpBufSize)); for (int y = 0; y < num_lines; y++) { for (size_t i = 0; i < channelData.size(); ++i) { PIZChannelData &cd = channelData[i]; // if (modp (y, cd.ys) != 0) // continue; size_t n = static_cast<size_t>(cd.nx * cd.size); memcpy(outPtr, cd.end, static_cast<size_t>(n * sizeof(unsigned short))); outPtr += n * sizeof(unsigned short); cd.end += n; } } return true; } #endif // TINYEXR_USE_PIZ #if TINYEXR_USE_ZFP struct ZFPCompressionParam { double rate; int precision; double tolerance; int type; // TINYEXR_ZFP_COMPRESSIONTYPE_* ZFPCompressionParam() { type = TINYEXR_ZFP_COMPRESSIONTYPE_RATE; rate = 2.0; precision = 0; tolerance = 0.0f; } }; bool FindZFPCompressionParam(ZFPCompressionParam *param, const EXRAttribute *attributes, int num_attributes) { bool foundType = false; for (int i = 0; i < num_attributes; i++) { if ((strcmp(attributes[i].name, "zfpCompressionType") == 0) && (attributes[i].size == 1)) { param->type = static_cast<int>(attributes[i].value[0]); foundType = true; } } if (!foundType) { return false; } if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) { for (int i = 0; i < num_attributes; i++) { if ((strcmp(attributes[i].name, "zfpCompressionRate") == 0) && (attributes[i].size == 8)) { param->rate = *(reinterpret_cast<double *>(attributes[i].value)); return true; } } } else if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) { for (int i = 0; i < num_attributes; i++) { if ((strcmp(attributes[i].name, "zfpCompressionPrecision") == 0) && (attributes[i].size == 4)) { param->rate = *(reinterpret_cast<int *>(attributes[i].value)); return true; } } } else if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) { for (int i = 0; i < num_attributes; i++) { if ((strcmp(attributes[i].name, "zfpCompressionTolerance") == 0) && (attributes[i].size == 8)) { param->tolerance = *(reinterpret_cast<double *>(attributes[i].value)); return true; } } } else { assert(0); } return false; } // Assume pixel format is FLOAT for all channels. static bool DecompressZfp(float *dst, int dst_width, int dst_num_lines, int num_channels, const unsigned char *src, unsigned long src_size, const ZFPCompressionParam &param) { size_t uncompressed_size = dst_width * dst_num_lines * num_channels; if (uncompressed_size == src_size) { // Data is not compressed(Issue 40). memcpy(dst, src, src_size); } zfp_stream *zfp = NULL; zfp_field *field = NULL; assert((dst_width % 4) == 0); assert((dst_num_lines % 4) == 0); if ((dst_width & 3U) || (dst_num_lines & 3U)) { return false; } field = zfp_field_2d(reinterpret_cast<void *>(const_cast<unsigned char *>(src)), zfp_type_float, dst_width, dst_num_lines * num_channels); zfp = zfp_stream_open(NULL); if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) { zfp_stream_set_rate(zfp, param.rate, zfp_type_float, /* dimention */ 2, /* write random access */ 0); } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) { zfp_stream_set_precision(zfp, param.precision, zfp_type_float); } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) { zfp_stream_set_accuracy(zfp, param.tolerance, zfp_type_float); } else { assert(0); } size_t buf_size = zfp_stream_maximum_size(zfp, field); std::vector<unsigned char> buf(buf_size); memcpy(&buf.at(0), src, src_size); bitstream *stream = stream_open(&buf.at(0), buf_size); zfp_stream_set_bit_stream(zfp, stream); zfp_stream_rewind(zfp); size_t image_size = dst_width * dst_num_lines; for (int c = 0; c < num_channels; c++) { // decompress 4x4 pixel block. for (int y = 0; y < dst_num_lines; y += 4) { for (int x = 0; x < dst_width; x += 4) { float fblock[16]; zfp_decode_block_float_2(zfp, fblock); for (int j = 0; j < 4; j++) { for (int i = 0; i < 4; i++) { dst[c * image_size + ((y + j) * dst_width + (x + i))] = fblock[j * 4 + i]; } } } } } zfp_field_free(field); zfp_stream_close(zfp); stream_close(stream); return true; } // Assume pixel format is FLOAT for all channels. bool CompressZfp(std::vector<unsigned char> *outBuf, unsigned int *outSize, const float *inPtr, int width, int num_lines, int num_channels, const ZFPCompressionParam &param) { zfp_stream *zfp = NULL; zfp_field *field = NULL; assert((width % 4) == 0); assert((num_lines % 4) == 0); if ((width & 3U) || (num_lines & 3U)) { return false; } // create input array. field = zfp_field_2d(reinterpret_cast<void *>(const_cast<float *>(inPtr)), zfp_type_float, width, num_lines * num_channels); zfp = zfp_stream_open(NULL); if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) { zfp_stream_set_rate(zfp, param.rate, zfp_type_float, 2, 0); } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) { zfp_stream_set_precision(zfp, param.precision, zfp_type_float); } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) { zfp_stream_set_accuracy(zfp, param.tolerance, zfp_type_float); } else { assert(0); } size_t buf_size = zfp_stream_maximum_size(zfp, field); outBuf->resize(buf_size); bitstream *stream = stream_open(&outBuf->at(0), buf_size); zfp_stream_set_bit_stream(zfp, stream); zfp_field_free(field); size_t image_size = width * num_lines; for (int c = 0; c < num_channels; c++) { // compress 4x4 pixel block. for (int y = 0; y < num_lines; y += 4) { for (int x = 0; x < width; x += 4) { float fblock[16]; for (int j = 0; j < 4; j++) { for (int i = 0; i < 4; i++) { fblock[j * 4 + i] = inPtr[c * image_size + ((y + j) * width + (x + i))]; } } zfp_encode_block_float_2(zfp, fblock); } } } zfp_stream_flush(zfp); (*outSize) = zfp_stream_compressed_size(zfp); zfp_stream_close(zfp); return true; } #endif // // ----------------------------------------------------------------- // // TODO(syoyo): Refactor function arguments. static bool DecodePixelData(/* out */ unsigned char **out_images, const int *requested_pixel_types, const unsigned char *data_ptr, size_t data_len, int compression_type, int line_order, int width, int height, int x_stride, int y, int line_no, int num_lines, size_t pixel_data_size, size_t num_attributes, const EXRAttribute *attributes, size_t num_channels, const EXRChannelInfo *channels, const std::vector<size_t> &channel_offset_list) { if (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { // PIZ #if TINYEXR_USE_PIZ if ((width == 0) || (num_lines == 0) || (pixel_data_size == 0)) { // Invalid input #90 return false; } // Allocate original data size. std::vector<unsigned char> outBuf(static_cast<size_t>( static_cast<size_t>(width * num_lines) * pixel_data_size)); size_t tmpBufLen = outBuf.size(); bool ret = tinyexr::DecompressPiz( reinterpret_cast<unsigned char *>(&outBuf.at(0)), data_ptr, tmpBufLen, data_len, static_cast<int>(num_channels), channels, width, num_lines); assert(ret); (void)ret; // For PIZ_COMPRESSION: // pixel sample data for channel 0 for scanline 0 // pixel sample data for channel 1 for scanline 0 // pixel sample data for channel ... for scanline 0 // pixel sample data for channel n for scanline 0 // pixel sample data for channel 0 for scanline 1 // pixel sample data for channel 1 for scanline 1 // pixel sample data for channel ... for scanline 1 // pixel sample data for channel n for scanline 1 // ... for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { FP16 hf; // hf.u = line_ptr[u]; // use `cpy` to avoid unaligned memory access when compiler's // optimization is on. tinyexr::cpy2(&(hf.u), line_ptr + u); tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { unsigned short *image = reinterpret_cast<unsigned short **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += static_cast<size_t>( (height - 1 - (line_no + static_cast<int>(v)))) * static_cast<size_t>(x_stride) + u; } *image = hf.u; } else { // HALF -> FLOAT FP32 f32 = half_to_float(hf); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += static_cast<size_t>( (height - 1 - (line_no + static_cast<int>(v)))) * static_cast<size_t>(x_stride) + u; } *image = f32.f; } } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned int *line_ptr = reinterpret_cast<unsigned int *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { unsigned int val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(&val); unsigned int *image = reinterpret_cast<unsigned int **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += static_cast<size_t>( (height - 1 - (line_no + static_cast<int>(v)))) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const float *line_ptr = reinterpret_cast<float *>(&outBuf.at( v * pixel_data_size * static_cast<size_t>(x_stride) + channel_offset_list[c] * static_cast<size_t>(x_stride))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { float val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += static_cast<size_t>( (height - 1 - (line_no + static_cast<int>(v)))) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else { assert(0); } } #else assert(0 && "PIZ is enabled in this build"); return false; #endif } else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS || compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { // Allocate original data size. std::vector<unsigned char> outBuf(static_cast<size_t>(width) * static_cast<size_t>(num_lines) * pixel_data_size); unsigned long dstLen = static_cast<unsigned long>(outBuf.size()); assert(dstLen > 0); if (!tinyexr::DecompressZip( reinterpret_cast<unsigned char *>(&outBuf.at(0)), &dstLen, data_ptr, static_cast<unsigned long>(data_len))) { return false; } // For ZIP_COMPRESSION: // pixel sample data for channel 0 for scanline 0 // pixel sample data for channel 1 for scanline 0 // pixel sample data for channel ... for scanline 0 // pixel sample data for channel n for scanline 0 // pixel sample data for channel 0 for scanline 1 // pixel sample data for channel 1 for scanline 1 // pixel sample data for channel ... for scanline 1 // pixel sample data for channel n for scanline 1 // ... for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &outBuf.at(v * static_cast<size_t>(pixel_data_size) * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { tinyexr::FP16 hf; // hf.u = line_ptr[u]; tinyexr::cpy2(&(hf.u), line_ptr + u); tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { unsigned short *image = reinterpret_cast<unsigned short **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = hf.u; } else { // HALF -> FLOAT tinyexr::FP32 f32 = half_to_float(hf); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = f32.f; } } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned int *line_ptr = reinterpret_cast<unsigned int *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { unsigned int val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(&val); unsigned int *image = reinterpret_cast<unsigned int **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const float *line_ptr = reinterpret_cast<float *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { float val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else { assert(0); return false; } } } else if (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) { // Allocate original data size. std::vector<unsigned char> outBuf(static_cast<size_t>(width) * static_cast<size_t>(num_lines) * pixel_data_size); unsigned long dstLen = static_cast<unsigned long>(outBuf.size()); assert(dstLen > 0); tinyexr::DecompressRle(reinterpret_cast<unsigned char *>(&outBuf.at(0)), dstLen, data_ptr, static_cast<unsigned long>(data_len)); // For RLE_COMPRESSION: // pixel sample data for channel 0 for scanline 0 // pixel sample data for channel 1 for scanline 0 // pixel sample data for channel ... for scanline 0 // pixel sample data for channel n for scanline 0 // pixel sample data for channel 0 for scanline 1 // pixel sample data for channel 1 for scanline 1 // pixel sample data for channel ... for scanline 1 // pixel sample data for channel n for scanline 1 // ... for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &outBuf.at(v * static_cast<size_t>(pixel_data_size) * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { tinyexr::FP16 hf; // hf.u = line_ptr[u]; tinyexr::cpy2(&(hf.u), line_ptr + u); tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { unsigned short *image = reinterpret_cast<unsigned short **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = hf.u; } else { // HALF -> FLOAT tinyexr::FP32 f32 = half_to_float(hf); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = f32.f; } } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned int *line_ptr = reinterpret_cast<unsigned int *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { unsigned int val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(&val); unsigned int *image = reinterpret_cast<unsigned int **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const float *line_ptr = reinterpret_cast<float *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { float val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else { assert(0); return false; } } } else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { #if TINYEXR_USE_ZFP tinyexr::ZFPCompressionParam zfp_compression_param; if (!FindZFPCompressionParam(&zfp_compression_param, attributes, num_attributes)) { assert(0); return false; } // Allocate original data size. std::vector<unsigned char> outBuf(static_cast<size_t>(width) * static_cast<size_t>(num_lines) * pixel_data_size); unsigned long dstLen = outBuf.size(); assert(dstLen > 0); tinyexr::DecompressZfp(reinterpret_cast<float *>(&outBuf.at(0)), width, num_lines, num_channels, data_ptr, static_cast<unsigned long>(data_len), zfp_compression_param); // For ZFP_COMPRESSION: // pixel sample data for channel 0 for scanline 0 // pixel sample data for channel 1 for scanline 0 // pixel sample data for channel ... for scanline 0 // pixel sample data for channel n for scanline 0 // pixel sample data for channel 0 for scanline 1 // pixel sample data for channel 1 for scanline 1 // pixel sample data for channel ... for scanline 1 // pixel sample data for channel n for scanline 1 // ... for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { assert(channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT); if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const float *line_ptr = reinterpret_cast<float *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { float val; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else { assert(0); return false; } } #else (void)attributes; (void)num_attributes; (void)num_channels; assert(0); return false; #endif } else if (compression_type == TINYEXR_COMPRESSIONTYPE_NONE) { for (size_t c = 0; c < num_channels; c++) { for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { const unsigned short *line_ptr = reinterpret_cast<const unsigned short *>( data_ptr + v * pixel_data_size * size_t(width) + channel_offset_list[c] * static_cast<size_t>(width)); if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { unsigned short *outLine = reinterpret_cast<unsigned short *>(out_images[c]); if (line_order == 0) { outLine += (size_t(y) + v) * size_t(x_stride); } else { outLine += (size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride); } for (int u = 0; u < width; u++) { tinyexr::FP16 hf; // hf.u = line_ptr[u]; tinyexr::cpy2(&(hf.u), line_ptr + u); tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); outLine[u] = hf.u; } } else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { float *outLine = reinterpret_cast<float *>(out_images[c]); if (line_order == 0) { outLine += (size_t(y) + v) * size_t(x_stride); } else { outLine += (size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride); } if (reinterpret_cast<const unsigned char *>(line_ptr + width) > (data_ptr + data_len)) { // Insufficient data size return false; } for (int u = 0; u < width; u++) { tinyexr::FP16 hf; // address may not be aliged. use byte-wise copy for safety.#76 // hf.u = line_ptr[u]; tinyexr::cpy2(&(hf.u), line_ptr + u); tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); tinyexr::FP32 f32 = half_to_float(hf); outLine[u] = f32.f; } } else { assert(0); return false; } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { const float *line_ptr = reinterpret_cast<const float *>( data_ptr + v * pixel_data_size * size_t(width) + channel_offset_list[c] * static_cast<size_t>(width)); float *outLine = reinterpret_cast<float *>(out_images[c]); if (line_order == 0) { outLine += (size_t(y) + v) * size_t(x_stride); } else { outLine += (size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride); } if (reinterpret_cast<const unsigned char *>(line_ptr + width) > (data_ptr + data_len)) { // Insufficient data size return false; } for (int u = 0; u < width; u++) { float val; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); outLine[u] = val; } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { const unsigned int *line_ptr = reinterpret_cast<const unsigned int *>( data_ptr + v * pixel_data_size * size_t(width) + channel_offset_list[c] * static_cast<size_t>(width)); unsigned int *outLine = reinterpret_cast<unsigned int *>(out_images[c]); if (line_order == 0) { outLine += (size_t(y) + v) * size_t(x_stride); } else { outLine += (size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride); } for (int u = 0; u < width; u++) { if (reinterpret_cast<const unsigned char *>(line_ptr + u) >= (data_ptr + data_len)) { // Corrupsed data? return false; } unsigned int val; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); outLine[u] = val; } } } } } return true; } static void DecodeTiledPixelData( unsigned char **out_images, int *width, int *height, const int *requested_pixel_types, const unsigned char *data_ptr, size_t data_len, int compression_type, int line_order, int data_width, int data_height, int tile_offset_x, int tile_offset_y, int tile_size_x, int tile_size_y, size_t pixel_data_size, size_t num_attributes, const EXRAttribute *attributes, size_t num_channels, const EXRChannelInfo *channels, const std::vector<size_t> &channel_offset_list) { assert(tile_offset_x * tile_size_x < data_width); assert(tile_offset_y * tile_size_y < data_height); // Compute actual image size in a tile. if ((tile_offset_x + 1) * tile_size_x >= data_width) { (*width) = data_width - (tile_offset_x * tile_size_x); } else { (*width) = tile_size_x; } if ((tile_offset_y + 1) * tile_size_y >= data_height) { (*height) = data_height - (tile_offset_y * tile_size_y); } else { (*height) = tile_size_y; } // Image size = tile size. DecodePixelData(out_images, requested_pixel_types, data_ptr, data_len, compression_type, line_order, (*width), tile_size_y, /* stride */ tile_size_x, /* y */ 0, /* line_no */ 0, (*height), pixel_data_size, num_attributes, attributes, num_channels, channels, channel_offset_list); } static bool ComputeChannelLayout(std::vector<size_t> *channel_offset_list, int *pixel_data_size, size_t *channel_offset, int num_channels, const EXRChannelInfo *channels) { channel_offset_list->resize(static_cast<size_t>(num_channels)); (*pixel_data_size) = 0; (*channel_offset) = 0; for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { (*channel_offset_list)[c] = (*channel_offset); if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { (*pixel_data_size) += sizeof(unsigned short); (*channel_offset) += sizeof(unsigned short); } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { (*pixel_data_size) += sizeof(float); (*channel_offset) += sizeof(float); } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { (*pixel_data_size) += sizeof(unsigned int); (*channel_offset) += sizeof(unsigned int); } else { // ??? return false; } } return true; } static unsigned char **AllocateImage(int num_channels, const EXRChannelInfo *channels, const int *requested_pixel_types, int data_width, int data_height) { unsigned char **images = reinterpret_cast<unsigned char **>(static_cast<float **>( malloc(sizeof(float *) * static_cast<size_t>(num_channels)))); for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { size_t data_len = static_cast<size_t>(data_width) * static_cast<size_t>(data_height); if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { // pixel_data_size += sizeof(unsigned short); // channel_offset += sizeof(unsigned short); // Alloc internal image for half type. if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { images[c] = reinterpret_cast<unsigned char *>(static_cast<unsigned short *>( malloc(sizeof(unsigned short) * data_len))); } else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { images[c] = reinterpret_cast<unsigned char *>( static_cast<float *>(malloc(sizeof(float) * data_len))); } else { assert(0); } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { // pixel_data_size += sizeof(float); // channel_offset += sizeof(float); images[c] = reinterpret_cast<unsigned char *>( static_cast<float *>(malloc(sizeof(float) * data_len))); } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { // pixel_data_size += sizeof(unsigned int); // channel_offset += sizeof(unsigned int); images[c] = reinterpret_cast<unsigned char *>( static_cast<unsigned int *>(malloc(sizeof(unsigned int) * data_len))); } else { assert(0); } } return images; } static int ParseEXRHeader(HeaderInfo *info, bool *empty_header, const EXRVersion *version, std::string *err, const unsigned char *buf, size_t size) { const char *marker = reinterpret_cast<const char *>(&buf[0]); if (empty_header) { (*empty_header) = false; } if (version->multipart) { if (size > 0 && marker[0] == '\0') { // End of header list. if (empty_header) { (*empty_header) = true; } return TINYEXR_SUCCESS; } } // According to the spec, the header of every OpenEXR file must contain at // least the following attributes: // // channels chlist // compression compression // dataWindow box2i // displayWindow box2i // lineOrder lineOrder // pixelAspectRatio float // screenWindowCenter v2f // screenWindowWidth float bool has_channels = false; bool has_compression = false; bool has_data_window = false; bool has_display_window = false; bool has_line_order = false; bool has_pixel_aspect_ratio = false; bool has_screen_window_center = false; bool has_screen_window_width = false; info->data_window[0] = 0; info->data_window[1] = 0; info->data_window[2] = 0; info->data_window[3] = 0; info->line_order = 0; // @fixme info->display_window[0] = 0; info->display_window[1] = 0; info->display_window[2] = 0; info->display_window[3] = 0; info->screen_window_center[0] = 0.0f; info->screen_window_center[1] = 0.0f; info->screen_window_width = -1.0f; info->pixel_aspect_ratio = -1.0f; info->tile_size_x = -1; info->tile_size_y = -1; info->tile_level_mode = -1; info->tile_rounding_mode = -1; info->attributes.clear(); // Read attributes size_t orig_size = size; for (size_t nattr = 0; nattr < TINYEXR_MAX_HEADER_ATTRIBUTES; nattr++) { if (0 == size) { if (err) { (*err) += "Insufficient data size for attributes.\n"; } return TINYEXR_ERROR_INVALID_DATA; } else if (marker[0] == '\0') { size--; break; } std::string attr_name; std::string attr_type; std::vector<unsigned char> data; size_t marker_size; if (!tinyexr::ReadAttribute(&attr_name, &attr_type, &data, &marker_size, marker, size)) { if (err) { (*err) += "Failed to read attribute.\n"; } return TINYEXR_ERROR_INVALID_DATA; } marker += marker_size; size -= marker_size; if (version->tiled && attr_name.compare("tiles") == 0) { unsigned int x_size, y_size; unsigned char tile_mode; assert(data.size() == 9); memcpy(&x_size, &data.at(0), sizeof(int)); memcpy(&y_size, &data.at(4), sizeof(int)); tile_mode = data[8]; tinyexr::swap4(&x_size); tinyexr::swap4(&y_size); info->tile_size_x = static_cast<int>(x_size); info->tile_size_y = static_cast<int>(y_size); // mode = levelMode + roundingMode * 16 info->tile_level_mode = tile_mode & 0x3; info->tile_rounding_mode = (tile_mode >> 4) & 0x1; } else if (attr_name.compare("compression") == 0) { bool ok = false; if (data[0] < TINYEXR_COMPRESSIONTYPE_PIZ) { ok = true; } if (data[0] == TINYEXR_COMPRESSIONTYPE_PIZ) { #if TINYEXR_USE_PIZ ok = true; #else if (err) { (*err) = "PIZ compression is not supported."; } return TINYEXR_ERROR_UNSUPPORTED_FORMAT; #endif } if (data[0] == TINYEXR_COMPRESSIONTYPE_ZFP) { #if TINYEXR_USE_ZFP ok = true; #else if (err) { (*err) = "ZFP compression is not supported."; } return TINYEXR_ERROR_UNSUPPORTED_FORMAT; #endif } if (!ok) { if (err) { (*err) = "Unknown compression type."; } return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } info->compression_type = static_cast<int>(data[0]); has_compression = true; } else if (attr_name.compare("channels") == 0) { // name: zero-terminated string, from 1 to 255 bytes long // pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2 // pLinear: unsigned char, possible values are 0 and 1 // reserved: three chars, should be zero // xSampling: int // ySampling: int if (!ReadChannelInfo(info->channels, data)) { if (err) { (*err) += "Failed to parse channel info.\n"; } return TINYEXR_ERROR_INVALID_DATA; } if (info->channels.size() < 1) { if (err) { (*err) += "# of channels is zero.\n"; } return TINYEXR_ERROR_INVALID_DATA; } has_channels = true; } else if (attr_name.compare("dataWindow") == 0) { if (data.size() >= 16) { memcpy(&info->data_window[0], &data.at(0), sizeof(int)); memcpy(&info->data_window[1], &data.at(4), sizeof(int)); memcpy(&info->data_window[2], &data.at(8), sizeof(int)); memcpy(&info->data_window[3], &data.at(12), sizeof(int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[0])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[1])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[2])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[3])); has_data_window = true; } } else if (attr_name.compare("displayWindow") == 0) { if (data.size() >= 16) { memcpy(&info->display_window[0], &data.at(0), sizeof(int)); memcpy(&info->display_window[1], &data.at(4), sizeof(int)); memcpy(&info->display_window[2], &data.at(8), sizeof(int)); memcpy(&info->display_window[3], &data.at(12), sizeof(int)); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->display_window[0])); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->display_window[1])); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->display_window[2])); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->display_window[3])); has_display_window = true; } } else if (attr_name.compare("lineOrder") == 0) { if (data.size() >= 1) { info->line_order = static_cast<int>(data[0]); has_line_order = true; } } else if (attr_name.compare("pixelAspectRatio") == 0) { if (data.size() >= sizeof(float)) { memcpy(&info->pixel_aspect_ratio, &data.at(0), sizeof(float)); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->pixel_aspect_ratio)); has_pixel_aspect_ratio = true; } } else if (attr_name.compare("screenWindowCenter") == 0) { if (data.size() >= 8) { memcpy(&info->screen_window_center[0], &data.at(0), sizeof(float)); memcpy(&info->screen_window_center[1], &data.at(4), sizeof(float)); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->screen_window_center[0])); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->screen_window_center[1])); has_screen_window_center = true; } } else if (attr_name.compare("screenWindowWidth") == 0) { if (data.size() >= sizeof(float)) { memcpy(&info->screen_window_width, &data.at(0), sizeof(float)); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->screen_window_width)); has_screen_window_width = true; } } else if (attr_name.compare("chunkCount") == 0) { if (data.size() >= sizeof(int)) { memcpy(&info->chunk_count, &data.at(0), sizeof(int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->chunk_count)); } } else { // Custom attribute(up to TINYEXR_MAX_CUSTOM_ATTRIBUTES) if (info->attributes.size() < TINYEXR_MAX_CUSTOM_ATTRIBUTES) { EXRAttribute attrib; #ifdef _MSC_VER strncpy_s(attrib.name, attr_name.c_str(), 255); strncpy_s(attrib.type, attr_type.c_str(), 255); #else strncpy(attrib.name, attr_name.c_str(), 255); strncpy(attrib.type, attr_type.c_str(), 255); #endif attrib.name[255] = '\0'; attrib.type[255] = '\0'; attrib.size = static_cast<int>(data.size()); attrib.value = static_cast<unsigned char *>(malloc(data.size())); memcpy(reinterpret_cast<char *>(attrib.value), &data.at(0), data.size()); info->attributes.push_back(attrib); } } } // Check if required attributes exist { std::stringstream ss_err; if (!has_compression) { ss_err << "\"compression\" attribute not found in the header." << std::endl; } if (!has_channels) { ss_err << "\"channels\" attribute not found in the header." << std::endl; } if (!has_line_order) { ss_err << "\"lineOrder\" attribute not found in the header." << std::endl; } if (!has_display_window) { ss_err << "\"displayWindow\" attribute not found in the header." << std::endl; } if (!has_data_window) { ss_err << "\"dataWindow\" attribute not found in the header or invalid." << std::endl; } if (!has_pixel_aspect_ratio) { ss_err << "\"pixelAspectRatio\" attribute not found in the header." << std::endl; } if (!has_screen_window_width) { ss_err << "\"screenWindowWidth\" attribute not found in the header." << std::endl; } if (!has_screen_window_center) { ss_err << "\"screenWindowCenter\" attribute not found in the header." << std::endl; } if (!(ss_err.str().empty())) { if (err) { (*err) += ss_err.str(); } return TINYEXR_ERROR_INVALID_HEADER; } } info->header_len = static_cast<unsigned int>(orig_size - size); return TINYEXR_SUCCESS; } // C++ HeaderInfo to C EXRHeader conversion. static void ConvertHeader(EXRHeader *exr_header, const HeaderInfo &info) { exr_header->pixel_aspect_ratio = info.pixel_aspect_ratio; exr_header->screen_window_center[0] = info.screen_window_center[0]; exr_header->screen_window_center[1] = info.screen_window_center[1]; exr_header->screen_window_width = info.screen_window_width; exr_header->chunk_count = info.chunk_count; exr_header->display_window[0] = info.display_window[0]; exr_header->display_window[1] = info.display_window[1]; exr_header->display_window[2] = info.display_window[2]; exr_header->display_window[3] = info.display_window[3]; exr_header->data_window[0] = info.data_window[0]; exr_header->data_window[1] = info.data_window[1]; exr_header->data_window[2] = info.data_window[2]; exr_header->data_window[3] = info.data_window[3]; exr_header->line_order = info.line_order; exr_header->compression_type = info.compression_type; exr_header->tile_size_x = info.tile_size_x; exr_header->tile_size_y = info.tile_size_y; exr_header->tile_level_mode = info.tile_level_mode; exr_header->tile_rounding_mode = info.tile_rounding_mode; exr_header->num_channels = static_cast<int>(info.channels.size()); exr_header->channels = static_cast<EXRChannelInfo *>(malloc( sizeof(EXRChannelInfo) * static_cast<size_t>(exr_header->num_channels))); for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { #ifdef _MSC_VER strncpy_s(exr_header->channels[c].name, info.channels[c].name.c_str(), 255); #else strncpy(exr_header->channels[c].name, info.channels[c].name.c_str(), 255); #endif // manually add '\0' for safety. exr_header->channels[c].name[255] = '\0'; exr_header->channels[c].pixel_type = info.channels[c].pixel_type; exr_header->channels[c].p_linear = info.channels[c].p_linear; exr_header->channels[c].x_sampling = info.channels[c].x_sampling; exr_header->channels[c].y_sampling = info.channels[c].y_sampling; } exr_header->pixel_types = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(exr_header->num_channels))); for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { exr_header->pixel_types[c] = info.channels[c].pixel_type; } // Initially fill with values of `pixel_types` exr_header->requested_pixel_types = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(exr_header->num_channels))); for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { exr_header->requested_pixel_types[c] = info.channels[c].pixel_type; } exr_header->num_custom_attributes = static_cast<int>(info.attributes.size()); if (exr_header->num_custom_attributes > 0) { // TODO(syoyo): Report warning when # of attributes exceeds // `TINYEXR_MAX_CUSTOM_ATTRIBUTES` if (exr_header->num_custom_attributes > TINYEXR_MAX_CUSTOM_ATTRIBUTES) { exr_header->num_custom_attributes = TINYEXR_MAX_CUSTOM_ATTRIBUTES; } exr_header->custom_attributes = static_cast<EXRAttribute *>(malloc( sizeof(EXRAttribute) * size_t(exr_header->num_custom_attributes))); for (size_t i = 0; i < info.attributes.size(); i++) { memcpy(exr_header->custom_attributes[i].name, info.attributes[i].name, 256); memcpy(exr_header->custom_attributes[i].type, info.attributes[i].type, 256); exr_header->custom_attributes[i].size = info.attributes[i].size; // Just copy poiner exr_header->custom_attributes[i].value = info.attributes[i].value; } } else { exr_header->custom_attributes = NULL; } exr_header->header_len = info.header_len; } static int DecodeChunk(EXRImage *exr_image, const EXRHeader *exr_header, const std::vector<tinyexr::tinyexr_uint64> &offsets, const unsigned char *head, const size_t size, std::string *err) { int num_channels = exr_header->num_channels; int num_scanline_blocks = 1; if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { num_scanline_blocks = 16; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { num_scanline_blocks = 32; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { num_scanline_blocks = 16; } int data_width = exr_header->data_window[2] - exr_header->data_window[0] + 1; int data_height = exr_header->data_window[3] - exr_header->data_window[1] + 1; size_t num_blocks = offsets.size(); std::vector<size_t> channel_offset_list; int pixel_data_size = 0; size_t channel_offset = 0; if (!tinyexr::ComputeChannelLayout(&channel_offset_list, &pixel_data_size, &channel_offset, num_channels, exr_header->channels)) { if (err) { (*err) += "Failed to compute channel layout.\n"; } return TINYEXR_ERROR_INVALID_DATA; } bool invalid_data = false; // TODO(LTE): Use atomic lock for MT safety. if (exr_header->tiled) { size_t num_tiles = offsets.size(); // = # of blocks exr_image->tiles = static_cast<EXRTile *>( calloc(sizeof(EXRTile), static_cast<size_t>(num_tiles))); for (size_t tile_idx = 0; tile_idx < num_tiles; tile_idx++) { // Allocate memory for each tile. exr_image->tiles[tile_idx].images = tinyexr::AllocateImage( num_channels, exr_header->channels, exr_header->requested_pixel_types, exr_header->tile_size_x, exr_header->tile_size_y); // 16 byte: tile coordinates // 4 byte : data size // ~ : data(uncompressed or compressed) if (offsets[tile_idx] + sizeof(int) * 5 > size) { if (err) { (*err) += "Insufficient data size.\n"; } return TINYEXR_ERROR_INVALID_DATA; } size_t data_size = size_t(size - (offsets[tile_idx] + sizeof(int) * 5)); const unsigned char *data_ptr = reinterpret_cast<const unsigned char *>(head + offsets[tile_idx]); int tile_coordinates[4]; memcpy(tile_coordinates, data_ptr, sizeof(int) * 4); tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[0])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[1])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[2])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[3])); // @todo{ LoD } if (tile_coordinates[2] != 0) { return TINYEXR_ERROR_UNSUPPORTED_FEATURE; } if (tile_coordinates[3] != 0) { return TINYEXR_ERROR_UNSUPPORTED_FEATURE; } int data_len; memcpy(&data_len, data_ptr + 16, sizeof(int)); // 16 = sizeof(tile_coordinates) tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len)); if (data_len < 4 || size_t(data_len) > data_size) { if (err) { (*err) += "Insufficient data length.\n"; } return TINYEXR_ERROR_INVALID_DATA; } // Move to data addr: 20 = 16 + 4; data_ptr += 20; tinyexr::DecodeTiledPixelData( exr_image->tiles[tile_idx].images, &(exr_image->tiles[tile_idx].width), &(exr_image->tiles[tile_idx].height), exr_header->requested_pixel_types, data_ptr, static_cast<size_t>(data_len), exr_header->compression_type, exr_header->line_order, data_width, data_height, tile_coordinates[0], tile_coordinates[1], exr_header->tile_size_x, exr_header->tile_size_y, static_cast<size_t>(pixel_data_size), static_cast<size_t>(exr_header->num_custom_attributes), exr_header->custom_attributes, static_cast<size_t>(exr_header->num_channels), exr_header->channels, channel_offset_list); exr_image->tiles[tile_idx].offset_x = tile_coordinates[0]; exr_image->tiles[tile_idx].offset_y = tile_coordinates[1]; exr_image->tiles[tile_idx].level_x = tile_coordinates[2]; exr_image->tiles[tile_idx].level_y = tile_coordinates[3]; exr_image->num_tiles = static_cast<int>(num_tiles); } } else { // scanline format exr_image->images = tinyexr::AllocateImage( num_channels, exr_header->channels, exr_header->requested_pixel_types, data_width, data_height); #ifdef _OPENMP #pragma omp parallel for #endif for (int y = 0; y < static_cast<int>(num_blocks); y++) { size_t y_idx = static_cast<size_t>(y); if (offsets[y_idx] + sizeof(int) * 2 > size) { invalid_data = true; } else { // 4 byte: scan line // 4 byte: data size // ~ : pixel data(uncompressed or compressed) size_t data_size = size_t(size - (offsets[y_idx] + sizeof(int) * 2)); const unsigned char *data_ptr = reinterpret_cast<const unsigned char *>(head + offsets[y_idx]); int line_no; memcpy(&line_no, data_ptr, sizeof(int)); int data_len; memcpy(&data_len, data_ptr + 4, sizeof(int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&line_no)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len)); if (size_t(data_len) > data_size) { invalid_data = true; } else { int end_line_no = (std::min)(line_no + num_scanline_blocks, (exr_header->data_window[3] + 1)); int num_lines = end_line_no - line_no; // assert(num_lines > 0); if (num_lines <= 0) { invalid_data = true; } else { // Move to data addr: 8 = 4 + 4; data_ptr += 8; // Adjust line_no with data_window.bmin.y line_no -= exr_header->data_window[1]; if (line_no < 0) { invalid_data = true; } else { if (!tinyexr::DecodePixelData( exr_image->images, exr_header->requested_pixel_types, data_ptr, static_cast<size_t>(data_len), exr_header->compression_type, exr_header->line_order, data_width, data_height, data_width, y, line_no, num_lines, static_cast<size_t>(pixel_data_size), static_cast<size_t>(exr_header->num_custom_attributes), exr_header->custom_attributes, static_cast<size_t>(exr_header->num_channels), exr_header->channels, channel_offset_list)) { invalid_data = true; } } } } } } // omp parallel } if (invalid_data) { return TINYEXR_ERROR_INVALID_DATA; } // Overwrite `pixel_type` with `requested_pixel_type`. { for (int c = 0; c < exr_header->num_channels; c++) { exr_header->pixel_types[c] = exr_header->requested_pixel_types[c]; } } { exr_image->num_channels = num_channels; exr_image->width = data_width; exr_image->height = data_height; } return TINYEXR_SUCCESS; } static bool ReconstructLineOffsets( std::vector<tinyexr::tinyexr_uint64> *offsets, size_t n, const unsigned char *head, const unsigned char *marker, const size_t size) { assert(head < marker); assert(offsets->size() == n); for (size_t i = 0; i < n; i++) { size_t offset = static_cast<size_t>(marker - head); // Offset should not exceed whole EXR file/data size. if ((offset + sizeof(tinyexr::tinyexr_uint64)) >= size) { return false; } int y; unsigned int data_len; memcpy(&y, marker, sizeof(int)); memcpy(&data_len, marker + 4, sizeof(unsigned int)); if (data_len >= size) { return false; } tinyexr::swap4(reinterpret_cast<unsigned int *>(&y)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len)); (*offsets)[i] = offset; marker += data_len + 8; // 8 = 4 bytes(y) + 4 bytes(data_len) } return true; } static int DecodeEXRImage(EXRImage *exr_image, const EXRHeader *exr_header, const unsigned char *head, const unsigned char *marker, const size_t size, const char **err) { if (exr_image == NULL || exr_header == NULL || head == NULL || marker == NULL || (size <= tinyexr::kEXRVersionSize)) { tinyexr::SetErrorMessage("Invalid argument for DecodeEXRImage().", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } int num_scanline_blocks = 1; if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { num_scanline_blocks = 16; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { num_scanline_blocks = 32; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { num_scanline_blocks = 16; } int data_width = exr_header->data_window[2] - exr_header->data_window[0]; if (data_width >= std::numeric_limits<int>::max()) { // Issue 63 tinyexr::SetErrorMessage("Invalid data window value", err); return TINYEXR_ERROR_INVALID_DATA; } data_width++; int data_height = exr_header->data_window[3] - exr_header->data_window[1]; if (data_height >= std::numeric_limits<int>::max()) { tinyexr::SetErrorMessage("Invalid data height value", err); return TINYEXR_ERROR_INVALID_DATA; } data_height++; if ((data_width < 0) || (data_height < 0)) { tinyexr::SetErrorMessage("data window or data height is negative.", err); return TINYEXR_ERROR_INVALID_DATA; } // Read offset tables. size_t num_blocks = 0; if (exr_header->chunk_count > 0) { // Use `chunkCount` attribute. num_blocks = static_cast<size_t>(exr_header->chunk_count); } else if (exr_header->tiled) { // @todo { LoD } size_t num_x_tiles = static_cast<size_t>(data_width) / static_cast<size_t>(exr_header->tile_size_x); if (num_x_tiles * static_cast<size_t>(exr_header->tile_size_x) < static_cast<size_t>(data_width)) { num_x_tiles++; } size_t num_y_tiles = static_cast<size_t>(data_height) / static_cast<size_t>(exr_header->tile_size_y); if (num_y_tiles * static_cast<size_t>(exr_header->tile_size_y) < static_cast<size_t>(data_height)) { num_y_tiles++; } num_blocks = num_x_tiles * num_y_tiles; } else { num_blocks = static_cast<size_t>(data_height) / static_cast<size_t>(num_scanline_blocks); if (num_blocks * static_cast<size_t>(num_scanline_blocks) < static_cast<size_t>(data_height)) { num_blocks++; } } std::vector<tinyexr::tinyexr_uint64> offsets(num_blocks); for (size_t y = 0; y < num_blocks; y++) { tinyexr::tinyexr_uint64 offset; // Issue #81 if ((marker + sizeof(tinyexr_uint64)) >= (head + size)) { tinyexr::SetErrorMessage("Insufficient data size in offset table.", err); return TINYEXR_ERROR_INVALID_DATA; } memcpy(&offset, marker, sizeof(tinyexr::tinyexr_uint64)); tinyexr::swap8(&offset); if (offset >= size) { tinyexr::SetErrorMessage("Invalid offset value in DecodeEXRImage.", err); return TINYEXR_ERROR_INVALID_DATA; } marker += sizeof(tinyexr::tinyexr_uint64); // = 8 offsets[y] = offset; } // If line offsets are invalid, we try to reconstruct it. // See OpenEXR/IlmImf/ImfScanLineInputFile.cpp::readLineOffsets() for details. for (size_t y = 0; y < num_blocks; y++) { if (offsets[y] <= 0) { // TODO(syoyo) Report as warning? // if (err) { // stringstream ss; // ss << "Incomplete lineOffsets." << std::endl; // (*err) += ss.str(); //} bool ret = ReconstructLineOffsets(&offsets, num_blocks, head, marker, size); if (ret) { // OK break; } else { tinyexr::SetErrorMessage( "Cannot reconstruct lineOffset table in DecodeEXRImage.", err); return TINYEXR_ERROR_INVALID_DATA; } } } { std::string e; int ret = DecodeChunk(exr_image, exr_header, offsets, head, size, &e); if (ret != TINYEXR_SUCCESS) { if (!e.empty()) { tinyexr::SetErrorMessage(e, err); } // release memory(if exists) if ((exr_header->num_channels > 0) && exr_image && exr_image->images) { for (size_t c = 0; c < size_t(exr_header->num_channels); c++) { if (exr_image->images[c]) { free(exr_image->images[c]); exr_image->images[c] = NULL; } } free(exr_image->images); exr_image->images = NULL; } } return ret; } } } // namespace tinyexr int LoadEXR(float **out_rgba, int *width, int *height, const char *filename, const char **err) { if (out_rgba == NULL) { tinyexr::SetErrorMessage("Invalid argument for LoadEXR()", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } EXRVersion exr_version; EXRImage exr_image; EXRHeader exr_header; InitEXRHeader(&exr_header); InitEXRImage(&exr_image); { int ret = ParseEXRVersionFromFile(&exr_version, filename); if (ret != TINYEXR_SUCCESS) { tinyexr::SetErrorMessage("Invalid EXR header.", err); return ret; } if (exr_version.multipart || exr_version.non_image) { tinyexr::SetErrorMessage( "Loading multipart or DeepImage is not supported in LoadEXR() API", err); return TINYEXR_ERROR_INVALID_DATA; // @fixme. } } { int ret = ParseEXRHeaderFromFile(&exr_header, &exr_version, filename, err); if (ret != TINYEXR_SUCCESS) { FreeEXRHeader(&exr_header); return ret; } } // Read HALF channel as FLOAT. for (int i = 0; i < exr_header.num_channels; i++) { if (exr_header.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) { exr_header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; } } { int ret = LoadEXRImageFromFile(&exr_image, &exr_header, filename, err); if (ret != TINYEXR_SUCCESS) { FreeEXRHeader(&exr_header); return ret; } } // RGBA int idxR = -1; int idxG = -1; int idxB = -1; int idxA = -1; for (int c = 0; c < exr_header.num_channels; c++) { if (strcmp(exr_header.channels[c].name, "R") == 0) { idxR = c; } else if (strcmp(exr_header.channels[c].name, "G") == 0) { idxG = c; } else if (strcmp(exr_header.channels[c].name, "B") == 0) { idxB = c; } else if (strcmp(exr_header.channels[c].name, "A") == 0) { idxA = c; } } if ((idxA == 0) && (idxR == -1) && (idxG == -1) && (idxB == -1)) { // Alpha channel only. if (exr_header.tiled) { // todo.implement this } (*out_rgba) = reinterpret_cast<float *>( malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) * static_cast<size_t>(exr_image.height))); for (int i = 0; i < exr_image.width * exr_image.height; i++) { const float val = reinterpret_cast<float **>(exr_image.images)[0][i]; (*out_rgba)[4 * i + 0] = val; (*out_rgba)[4 * i + 1] = val; (*out_rgba)[4 * i + 2] = val; (*out_rgba)[4 * i + 3] = val; } } else { // Assume RGB(A) if (idxR == -1) { tinyexr::SetErrorMessage("R channel not found", err); // @todo { free exr_image } FreeEXRHeader(&exr_header); return TINYEXR_ERROR_INVALID_DATA; } if (idxG == -1) { tinyexr::SetErrorMessage("G channel not found", err); // @todo { free exr_image } FreeEXRHeader(&exr_header); return TINYEXR_ERROR_INVALID_DATA; } if (idxB == -1) { tinyexr::SetErrorMessage("B channel not found", err); // @todo { free exr_image } FreeEXRHeader(&exr_header); return TINYEXR_ERROR_INVALID_DATA; } (*out_rgba) = reinterpret_cast<float *>( malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) * static_cast<size_t>(exr_image.height))); if (exr_header.tiled) { for (int it = 0; it < exr_image.num_tiles; it++) { for (int j = 0; j < exr_header.tile_size_y; j++) for (int i = 0; i < exr_header.tile_size_x; i++) { const int ii = exr_image.tiles[it].offset_x * exr_header.tile_size_x + i; const int jj = exr_image.tiles[it].offset_y * exr_header.tile_size_y + j; const int idx = ii + jj * exr_image.width; // out of region check. if (ii >= exr_image.width) { continue; } if (jj >= exr_image.height) { continue; } const int srcIdx = i + j * exr_header.tile_size_x; unsigned char **src = exr_image.tiles[it].images; (*out_rgba)[4 * idx + 0] = reinterpret_cast<float **>(src)[idxR][srcIdx]; (*out_rgba)[4 * idx + 1] = reinterpret_cast<float **>(src)[idxG][srcIdx]; (*out_rgba)[4 * idx + 2] = reinterpret_cast<float **>(src)[idxB][srcIdx]; if (idxA != -1) { (*out_rgba)[4 * idx + 3] = reinterpret_cast<float **>(src)[idxA][srcIdx]; } else { (*out_rgba)[4 * idx + 3] = 1.0; } } } } else { for (int i = 0; i < exr_image.width * exr_image.height; i++) { (*out_rgba)[4 * i + 0] = reinterpret_cast<float **>(exr_image.images)[idxR][i]; (*out_rgba)[4 * i + 1] = reinterpret_cast<float **>(exr_image.images)[idxG][i]; (*out_rgba)[4 * i + 2] = reinterpret_cast<float **>(exr_image.images)[idxB][i]; if (idxA != -1) { (*out_rgba)[4 * i + 3] = reinterpret_cast<float **>(exr_image.images)[idxA][i]; } else { (*out_rgba)[4 * i + 3] = 1.0; } } } } (*width) = exr_image.width; (*height) = exr_image.height; FreeEXRHeader(&exr_header); FreeEXRImage(&exr_image); return TINYEXR_SUCCESS; } int ParseEXRHeaderFromMemory(EXRHeader *exr_header, const EXRVersion *version, const unsigned char *memory, size_t size, const char **err) { if (memory == NULL || exr_header == NULL) { tinyexr::SetErrorMessage( "Invalid argument. `memory` or `exr_header` argument is null in " "ParseEXRHeaderFromMemory()", err); // Invalid argument return TINYEXR_ERROR_INVALID_ARGUMENT; } if (size < tinyexr::kEXRVersionSize) { tinyexr::SetErrorMessage("Insufficient header/data size.\n", err); return TINYEXR_ERROR_INVALID_DATA; } const unsigned char *marker = memory + tinyexr::kEXRVersionSize; size_t marker_size = size - tinyexr::kEXRVersionSize; tinyexr::HeaderInfo info; info.clear(); std::string err_str; int ret = ParseEXRHeader(&info, NULL, version, &err_str, marker, marker_size); if (ret != TINYEXR_SUCCESS) { if (err && !err_str.empty()) { tinyexr::SetErrorMessage(err_str, err); } } ConvertHeader(exr_header, info); // transfoer `tiled` from version. exr_header->tiled = version->tiled; return ret; } int LoadEXRFromMemory(float **out_rgba, int *width, int *height, const unsigned char *memory, size_t size, const char **err) { if (out_rgba == NULL || memory == NULL) { tinyexr::SetErrorMessage("Invalid argument for LoadEXRFromMemory", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } EXRVersion exr_version; EXRImage exr_image; EXRHeader exr_header; InitEXRHeader(&exr_header); int ret = ParseEXRVersionFromMemory(&exr_version, memory, size); if (ret != TINYEXR_SUCCESS) { tinyexr::SetErrorMessage("Failed to parse EXR version", err); return ret; } ret = ParseEXRHeaderFromMemory(&exr_header, &exr_version, memory, size, err); if (ret != TINYEXR_SUCCESS) { return ret; } // Read HALF channel as FLOAT. for (int i = 0; i < exr_header.num_channels; i++) { if (exr_header.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) { exr_header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; } } InitEXRImage(&exr_image); ret = LoadEXRImageFromMemory(&exr_image, &exr_header, memory, size, err); if (ret != TINYEXR_SUCCESS) { return ret; } // RGBA int idxR = -1; int idxG = -1; int idxB = -1; int idxA = -1; for (int c = 0; c < exr_header.num_channels; c++) { if (strcmp(exr_header.channels[c].name, "R") == 0) { idxR = c; } else if (strcmp(exr_header.channels[c].name, "G") == 0) { idxG = c; } else if (strcmp(exr_header.channels[c].name, "B") == 0) { idxB = c; } else if (strcmp(exr_header.channels[c].name, "A") == 0) { idxA = c; } } if (idxR == -1) { tinyexr::SetErrorMessage("R channel not found", err); // @todo { free exr_image } return TINYEXR_ERROR_INVALID_DATA; } if (idxG == -1) { tinyexr::SetErrorMessage("G channel not found", err); // @todo { free exr_image } return TINYEXR_ERROR_INVALID_DATA; } if (idxB == -1) { tinyexr::SetErrorMessage("B channel not found", err); // @todo { free exr_image } return TINYEXR_ERROR_INVALID_DATA; } (*out_rgba) = reinterpret_cast<float *>( malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) * static_cast<size_t>(exr_image.height))); if (exr_header.tiled) { for (int it = 0; it < exr_image.num_tiles; it++) { for (int j = 0; j < exr_header.tile_size_y; j++) for (int i = 0; i < exr_header.tile_size_x; i++) { const int ii = exr_image.tiles[it].offset_x * exr_header.tile_size_x + i; const int jj = exr_image.tiles[it].offset_y * exr_header.tile_size_y + j; const int idx = ii + jj * exr_image.width; // out of region check. if (ii >= exr_image.width) { continue; } if (jj >= exr_image.height) { continue; } const int srcIdx = i + j * exr_header.tile_size_x; unsigned char **src = exr_image.tiles[it].images; (*out_rgba)[4 * idx + 0] = reinterpret_cast<float **>(src)[idxR][srcIdx]; (*out_rgba)[4 * idx + 1] = reinterpret_cast<float **>(src)[idxG][srcIdx]; (*out_rgba)[4 * idx + 2] = reinterpret_cast<float **>(src)[idxB][srcIdx]; if (idxA != -1) { (*out_rgba)[4 * idx + 3] = reinterpret_cast<float **>(src)[idxA][srcIdx]; } else { (*out_rgba)[4 * idx + 3] = 1.0; } } } } else { for (int i = 0; i < exr_image.width * exr_image.height; i++) { (*out_rgba)[4 * i + 0] = reinterpret_cast<float **>(exr_image.images)[idxR][i]; (*out_rgba)[4 * i + 1] = reinterpret_cast<float **>(exr_image.images)[idxG][i]; (*out_rgba)[4 * i + 2] = reinterpret_cast<float **>(exr_image.images)[idxB][i]; if (idxA != -1) { (*out_rgba)[4 * i + 3] = reinterpret_cast<float **>(exr_image.images)[idxA][i]; } else { (*out_rgba)[4 * i + 3] = 1.0; } } } (*width) = exr_image.width; (*height) = exr_image.height; FreeEXRHeader(&exr_header); FreeEXRImage(&exr_image); return TINYEXR_SUCCESS; } int LoadEXRImageFromFile(EXRImage *exr_image, const EXRHeader *exr_header, const char *filename, const char **err) { if (exr_image == NULL) { tinyexr::SetErrorMessage("Invalid argument for LoadEXRImageFromFile", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "rb"); #else FILE *fp = fopen(filename, "rb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); if (filesize < 16) { tinyexr::SetErrorMessage("File size too short " + std::string(filename), err); return TINYEXR_ERROR_INVALID_FILE; } std::vector<unsigned char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); fclose(fp); (void)ret; } return LoadEXRImageFromMemory(exr_image, exr_header, &buf.at(0), filesize, err); } int LoadEXRImageFromMemory(EXRImage *exr_image, const EXRHeader *exr_header, const unsigned char *memory, const size_t size, const char **err) { if (exr_image == NULL || memory == NULL || (size < tinyexr::kEXRVersionSize)) { tinyexr::SetErrorMessage("Invalid argument for LoadEXRImageFromMemory", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } if (exr_header->header_len == 0) { tinyexr::SetErrorMessage("EXRHeader variable is not initialized.", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } const unsigned char *head = memory; const unsigned char *marker = reinterpret_cast<const unsigned char *>( memory + exr_header->header_len + 8); // +8 for magic number + version header. return tinyexr::DecodeEXRImage(exr_image, exr_header, head, marker, size, err); } size_t SaveEXRImageToMemory(const EXRImage *exr_image, const EXRHeader *exr_header, unsigned char **memory_out, const char **err) { if (exr_image == NULL || memory_out == NULL || exr_header->compression_type < 0) { tinyexr::SetErrorMessage("Invalid argument for SaveEXRImageToMemory", err); return 0; } #if !TINYEXR_USE_PIZ if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { tinyexr::SetErrorMessage("PIZ compression is not supported in this build", err); return 0; } #endif #if !TINYEXR_USE_ZFP if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { tinyexr::SetErrorMessage("ZFP compression is not supported in this build", err); return 0; } #endif #if TINYEXR_USE_ZFP for (size_t i = 0; i < static_cast<size_t>(exr_header->num_channels); i++) { if (exr_header->requested_pixel_types[i] != TINYEXR_PIXELTYPE_FLOAT) { tinyexr::SetErrorMessage("Pixel type must be FLOAT for ZFP compression", err); return 0; } } #endif std::vector<unsigned char> memory; // Header { const char header[] = {0x76, 0x2f, 0x31, 0x01}; memory.insert(memory.end(), header, header + 4); } // Version, scanline. { char marker[] = {2, 0, 0, 0}; /* @todo if (exr_header->tiled) { marker[1] |= 0x2; } if (exr_header->long_name) { marker[1] |= 0x4; } if (exr_header->non_image) { marker[1] |= 0x8; } if (exr_header->multipart) { marker[1] |= 0x10; } */ memory.insert(memory.end(), marker, marker + 4); } int num_scanlines = 1; if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { num_scanlines = 16; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { num_scanlines = 32; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { num_scanlines = 16; } // Write attributes. std::vector<tinyexr::ChannelInfo> channels; { std::vector<unsigned char> data; for (int c = 0; c < exr_header->num_channels; c++) { tinyexr::ChannelInfo info; info.p_linear = 0; info.pixel_type = exr_header->requested_pixel_types[c]; info.x_sampling = 1; info.y_sampling = 1; info.name = std::string(exr_header->channels[c].name); channels.push_back(info); } tinyexr::WriteChannelInfo(data, channels); tinyexr::WriteAttributeToMemory(&memory, "channels", "chlist", &data.at(0), static_cast<int>(data.size())); } { int comp = exr_header->compression_type; tinyexr::swap4(reinterpret_cast<unsigned int *>(&comp)); tinyexr::WriteAttributeToMemory( &memory, "compression", "compression", reinterpret_cast<const unsigned char *>(&comp), 1); } { int data[4] = {0, 0, exr_image->width - 1, exr_image->height - 1}; tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[0])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[1])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[2])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[3])); tinyexr::WriteAttributeToMemory( &memory, "dataWindow", "box2i", reinterpret_cast<const unsigned char *>(data), sizeof(int) * 4); tinyexr::WriteAttributeToMemory( &memory, "displayWindow", "box2i", reinterpret_cast<const unsigned char *>(data), sizeof(int) * 4); } { unsigned char line_order = 0; // @fixme { read line_order from EXRHeader } tinyexr::WriteAttributeToMemory(&memory, "lineOrder", "lineOrder", &line_order, 1); } { float aspectRatio = 1.0f; tinyexr::swap4(reinterpret_cast<unsigned int *>(&aspectRatio)); tinyexr::WriteAttributeToMemory( &memory, "pixelAspectRatio", "float", reinterpret_cast<const unsigned char *>(&aspectRatio), sizeof(float)); } { float center[2] = {0.0f, 0.0f}; tinyexr::swap4(reinterpret_cast<unsigned int *>(&center[0])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&center[1])); tinyexr::WriteAttributeToMemory( &memory, "screenWindowCenter", "v2f", reinterpret_cast<const unsigned char *>(center), 2 * sizeof(float)); } { float w = static_cast<float>(exr_image->width); tinyexr::swap4(reinterpret_cast<unsigned int *>(&w)); tinyexr::WriteAttributeToMemory(&memory, "screenWindowWidth", "float", reinterpret_cast<const unsigned char *>(&w), sizeof(float)); } // Custom attributes if (exr_header->num_custom_attributes > 0) { for (int i = 0; i < exr_header->num_custom_attributes; i++) { tinyexr::WriteAttributeToMemory( &memory, exr_header->custom_attributes[i].name, exr_header->custom_attributes[i].type, reinterpret_cast<const unsigned char *>( exr_header->custom_attributes[i].value), exr_header->custom_attributes[i].size); } } { // end of header unsigned char e = 0; memory.push_back(e); } int num_blocks = exr_image->height / num_scanlines; if (num_blocks * num_scanlines < exr_image->height) { num_blocks++; } std::vector<tinyexr::tinyexr_uint64> offsets(static_cast<size_t>(num_blocks)); size_t headerSize = memory.size(); tinyexr::tinyexr_uint64 offset = headerSize + static_cast<size_t>(num_blocks) * sizeof( tinyexr::tinyexr_int64); // sizeof(header) + sizeof(offsetTable) std::vector<std::vector<unsigned char> > data_list( static_cast<size_t>(num_blocks)); std::vector<size_t> channel_offset_list( static_cast<size_t>(exr_header->num_channels)); int pixel_data_size = 0; size_t channel_offset = 0; for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { channel_offset_list[c] = channel_offset; if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { pixel_data_size += sizeof(unsigned short); channel_offset += sizeof(unsigned short); } else if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { pixel_data_size += sizeof(float); channel_offset += sizeof(float); } else if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT) { pixel_data_size += sizeof(unsigned int); channel_offset += sizeof(unsigned int); } else { assert(0); } } #if TINYEXR_USE_ZFP tinyexr::ZFPCompressionParam zfp_compression_param; // Use ZFP compression parameter from custom attributes(if such a parameter // exists) { bool ret = tinyexr::FindZFPCompressionParam( &zfp_compression_param, exr_header->custom_attributes, exr_header->num_custom_attributes); if (!ret) { // Use predefined compression parameter. zfp_compression_param.type = 0; zfp_compression_param.rate = 2; } } #endif // Use signed int since some OpenMP compiler doesn't allow unsigned type for // `parallel for` #ifdef _OPENMP #pragma omp parallel for #endif for (int i = 0; i < num_blocks; i++) { size_t ii = static_cast<size_t>(i); int start_y = num_scanlines * i; int endY = (std::min)(num_scanlines * (i + 1), exr_image->height); int h = endY - start_y; std::vector<unsigned char> buf( static_cast<size_t>(exr_image->width * h * pixel_data_size)); for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { if (exr_header->pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { for (int y = 0; y < h; y++) { // Assume increasing Y float *line_ptr = reinterpret_cast<float *>(&buf.at( static_cast<size_t>(pixel_data_size * y * exr_image->width) + channel_offset_list[c] * static_cast<size_t>(exr_image->width))); for (int x = 0; x < exr_image->width; x++) { tinyexr::FP16 h16; h16.u = reinterpret_cast<unsigned short **>( exr_image->images)[c][(y + start_y) * exr_image->width + x]; tinyexr::FP32 f32 = half_to_float(h16); tinyexr::swap4(reinterpret_cast<unsigned int *>(&f32.f)); // line_ptr[x] = f32.f; tinyexr::cpy4(line_ptr + x, &(f32.f)); } } } else if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { for (int y = 0; y < h; y++) { // Assume increasing Y unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &buf.at(static_cast<size_t>(pixel_data_size * y * exr_image->width) + channel_offset_list[c] * static_cast<size_t>(exr_image->width))); for (int x = 0; x < exr_image->width; x++) { unsigned short val = reinterpret_cast<unsigned short **>( exr_image->images)[c][(y + start_y) * exr_image->width + x]; tinyexr::swap2(&val); // line_ptr[x] = val; tinyexr::cpy2(line_ptr + x, &val); } } } else { assert(0); } } else if (exr_header->pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { for (int y = 0; y < h; y++) { // Assume increasing Y unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &buf.at(static_cast<size_t>(pixel_data_size * y * exr_image->width) + channel_offset_list[c] * static_cast<size_t>(exr_image->width))); for (int x = 0; x < exr_image->width; x++) { tinyexr::FP32 f32; f32.f = reinterpret_cast<float **>( exr_image->images)[c][(y + start_y) * exr_image->width + x]; tinyexr::FP16 h16; h16 = float_to_half_full(f32); tinyexr::swap2(reinterpret_cast<unsigned short *>(&h16.u)); // line_ptr[x] = h16.u; tinyexr::cpy2(line_ptr + x, &(h16.u)); } } } else if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { for (int y = 0; y < h; y++) { // Assume increasing Y float *line_ptr = reinterpret_cast<float *>(&buf.at( static_cast<size_t>(pixel_data_size * y * exr_image->width) + channel_offset_list[c] * static_cast<size_t>(exr_image->width))); for (int x = 0; x < exr_image->width; x++) { float val = reinterpret_cast<float **>( exr_image->images)[c][(y + start_y) * exr_image->width + x]; tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); // line_ptr[x] = val; tinyexr::cpy4(line_ptr + x, &val); } } } else { assert(0); } } else if (exr_header->pixel_types[c] == TINYEXR_PIXELTYPE_UINT) { for (int y = 0; y < h; y++) { // Assume increasing Y unsigned int *line_ptr = reinterpret_cast<unsigned int *>(&buf.at( static_cast<size_t>(pixel_data_size * y * exr_image->width) + channel_offset_list[c] * static_cast<size_t>(exr_image->width))); for (int x = 0; x < exr_image->width; x++) { unsigned int val = reinterpret_cast<unsigned int **>( exr_image->images)[c][(y + start_y) * exr_image->width + x]; tinyexr::swap4(&val); // line_ptr[x] = val; tinyexr::cpy4(line_ptr + x, &val); } } } } if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_NONE) { // 4 byte: scan line // 4 byte: data size // ~ : pixel data(uncompressed) std::vector<unsigned char> header(8); unsigned int data_len = static_cast<unsigned int>(buf.size()); memcpy(&header.at(0), &start_y, sizeof(int)); memcpy(&header.at(4), &data_len, sizeof(unsigned int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0))); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4))); data_list[ii].insert(data_list[ii].end(), header.begin(), header.end()); data_list[ii].insert(data_list[ii].end(), buf.begin(), buf.begin() + data_len); } else if ((exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) || (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP)) { #if TINYEXR_USE_MINIZ std::vector<unsigned char> block(tinyexr::miniz::mz_compressBound( static_cast<unsigned long>(buf.size()))); #else std::vector<unsigned char> block( compressBound(static_cast<uLong>(buf.size()))); #endif tinyexr::tinyexr_uint64 outSize = block.size(); tinyexr::CompressZip(&block.at(0), outSize, reinterpret_cast<const unsigned char *>(&buf.at(0)), static_cast<unsigned long>(buf.size())); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(compressed) std::vector<unsigned char> header(8); unsigned int data_len = static_cast<unsigned int>(outSize); // truncate memcpy(&header.at(0), &start_y, sizeof(int)); memcpy(&header.at(4), &data_len, sizeof(unsigned int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0))); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4))); data_list[ii].insert(data_list[ii].end(), header.begin(), header.end()); data_list[ii].insert(data_list[ii].end(), block.begin(), block.begin() + data_len); } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_RLE) { // (buf.size() * 3) / 2 would be enough. std::vector<unsigned char> block((buf.size() * 3) / 2); tinyexr::tinyexr_uint64 outSize = block.size(); tinyexr::CompressRle(&block.at(0), outSize, reinterpret_cast<const unsigned char *>(&buf.at(0)), static_cast<unsigned long>(buf.size())); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(compressed) std::vector<unsigned char> header(8); unsigned int data_len = static_cast<unsigned int>(outSize); // truncate memcpy(&header.at(0), &start_y, sizeof(int)); memcpy(&header.at(4), &data_len, sizeof(unsigned int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0))); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4))); data_list[ii].insert(data_list[ii].end(), header.begin(), header.end()); data_list[ii].insert(data_list[ii].end(), block.begin(), block.begin() + data_len); } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { #if TINYEXR_USE_PIZ unsigned int bufLen = 8192 + static_cast<unsigned int>( 2 * static_cast<unsigned int>( buf.size())); // @fixme { compute good bound. } std::vector<unsigned char> block(bufLen); unsigned int outSize = static_cast<unsigned int>(block.size()); CompressPiz(&block.at(0), &outSize, reinterpret_cast<const unsigned char *>(&buf.at(0)), buf.size(), channels, exr_image->width, h); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(compressed) std::vector<unsigned char> header(8); unsigned int data_len = outSize; memcpy(&header.at(0), &start_y, sizeof(int)); memcpy(&header.at(4), &data_len, sizeof(unsigned int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0))); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4))); data_list[ii].insert(data_list[ii].end(), header.begin(), header.end()); data_list[ii].insert(data_list[ii].end(), block.begin(), block.begin() + data_len); #else assert(0); #endif } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { #if TINYEXR_USE_ZFP std::vector<unsigned char> block; unsigned int outSize; tinyexr::CompressZfp( &block, &outSize, reinterpret_cast<const float *>(&buf.at(0)), exr_image->width, h, exr_header->num_channels, zfp_compression_param); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(compressed) std::vector<unsigned char> header(8); unsigned int data_len = outSize; memcpy(&header.at(0), &start_y, sizeof(int)); memcpy(&header.at(4), &data_len, sizeof(unsigned int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0))); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4))); data_list[ii].insert(data_list[ii].end(), header.begin(), header.end()); data_list[ii].insert(data_list[ii].end(), block.begin(), block.begin() + data_len); #else assert(0); #endif } else { assert(0); } } // omp parallel for (size_t i = 0; i < static_cast<size_t>(num_blocks); i++) { offsets[i] = offset; tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64 *>(&offsets[i])); offset += data_list[i].size(); } size_t totalSize = static_cast<size_t>(offset); { memory.insert( memory.end(), reinterpret_cast<unsigned char *>(&offsets.at(0)), reinterpret_cast<unsigned char *>(&offsets.at(0)) + sizeof(tinyexr::tinyexr_uint64) * static_cast<size_t>(num_blocks)); } if ( memory.size() == 0 ) { tinyexr::SetErrorMessage("Output memory size is zero", err); return 0; } (*memory_out) = static_cast<unsigned char *>(malloc(totalSize)); memcpy((*memory_out), &memory.at(0), memory.size()); unsigned char *memory_ptr = *memory_out + memory.size(); for (size_t i = 0; i < static_cast<size_t>(num_blocks); i++) { memcpy(memory_ptr, &data_list[i].at(0), data_list[i].size()); memory_ptr += data_list[i].size(); } return totalSize; // OK } int SaveEXRImageToFile(const EXRImage *exr_image, const EXRHeader *exr_header, const char *filename, const char **err) { if (exr_image == NULL || filename == NULL || exr_header->compression_type < 0) { tinyexr::SetErrorMessage("Invalid argument for SaveEXRImageToFile", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } #if !TINYEXR_USE_PIZ if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { tinyexr::SetErrorMessage("PIZ compression is not supported in this build", err); return TINYEXR_ERROR_UNSUPPORTED_FEATURE; } #endif #if !TINYEXR_USE_ZFP if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { tinyexr::SetErrorMessage("ZFP compression is not supported in this build", err); return TINYEXR_ERROR_UNSUPPORTED_FEATURE; } #endif #ifdef _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "wb"); #else FILE *fp = fopen(filename, "wb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot write a file", err); return TINYEXR_ERROR_CANT_WRITE_FILE; } unsigned char *mem = NULL; size_t mem_size = SaveEXRImageToMemory(exr_image, exr_header, &mem, err); if (mem_size == 0) { return TINYEXR_ERROR_SERIALZATION_FAILED; } size_t written_size = 0; if ((mem_size > 0) && mem) { written_size = fwrite(mem, 1, mem_size, fp); } free(mem); fclose(fp); if (written_size != mem_size) { tinyexr::SetErrorMessage("Cannot write a file", err); return TINYEXR_ERROR_CANT_WRITE_FILE; } return TINYEXR_SUCCESS; } int LoadDeepEXR(DeepImage *deep_image, const char *filename, const char **err) { if (deep_image == NULL) { tinyexr::SetErrorMessage("Invalid argument for LoadDeepEXR", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _MSC_VER FILE *fp = NULL; errno_t errcode = fopen_s(&fp, filename, "rb"); if ((0 != errcode) || (!fp)) { tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } #else FILE *fp = fopen(filename, "rb"); if (!fp) { tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } #endif size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); if (filesize == 0) { fclose(fp); tinyexr::SetErrorMessage("File size is zero : " + std::string(filename), err); return TINYEXR_ERROR_INVALID_FILE; } std::vector<char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); (void)ret; } fclose(fp); const char *head = &buf[0]; const char *marker = &buf[0]; // Header check. { const char header[] = {0x76, 0x2f, 0x31, 0x01}; if (memcmp(marker, header, 4) != 0) { tinyexr::SetErrorMessage("Invalid magic number", err); return TINYEXR_ERROR_INVALID_MAGIC_NUMBER; } marker += 4; } // Version, scanline. { // ver 2.0, scanline, deep bit on(0x800) // must be [2, 0, 0, 0] if (marker[0] != 2 || marker[1] != 8 || marker[2] != 0 || marker[3] != 0) { tinyexr::SetErrorMessage("Unsupported version or scanline", err); return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } marker += 4; } int dx = -1; int dy = -1; int dw = -1; int dh = -1; int num_scanline_blocks = 1; // 16 for ZIP compression. int compression_type = -1; int num_channels = -1; std::vector<tinyexr::ChannelInfo> channels; // Read attributes size_t size = filesize - tinyexr::kEXRVersionSize; for (;;) { if (0 == size) { return TINYEXR_ERROR_INVALID_DATA; } else if (marker[0] == '\0') { marker++; size--; break; } std::string attr_name; std::string attr_type; std::vector<unsigned char> data; size_t marker_size; if (!tinyexr::ReadAttribute(&attr_name, &attr_type, &data, &marker_size, marker, size)) { return TINYEXR_ERROR_INVALID_DATA; } marker += marker_size; size -= marker_size; if (attr_name.compare("compression") == 0) { compression_type = data[0]; if (compression_type > TINYEXR_COMPRESSIONTYPE_PIZ) { std::stringstream ss; ss << "Unsupported compression type : " << compression_type; tinyexr::SetErrorMessage(ss.str(), err); return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { num_scanline_blocks = 16; } } else if (attr_name.compare("channels") == 0) { // name: zero-terminated string, from 1 to 255 bytes long // pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2 // pLinear: unsigned char, possible values are 0 and 1 // reserved: three chars, should be zero // xSampling: int // ySampling: int if (!tinyexr::ReadChannelInfo(channels, data)) { tinyexr::SetErrorMessage("Failed to parse channel info", err); return TINYEXR_ERROR_INVALID_DATA; } num_channels = static_cast<int>(channels.size()); if (num_channels < 1) { tinyexr::SetErrorMessage("Invalid channels format", err); return TINYEXR_ERROR_INVALID_DATA; } } else if (attr_name.compare("dataWindow") == 0) { memcpy(&dx, &data.at(0), sizeof(int)); memcpy(&dy, &data.at(4), sizeof(int)); memcpy(&dw, &data.at(8), sizeof(int)); memcpy(&dh, &data.at(12), sizeof(int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&dx)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&dy)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&dw)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&dh)); } else if (attr_name.compare("displayWindow") == 0) { int x; int y; int w; int h; memcpy(&x, &data.at(0), sizeof(int)); memcpy(&y, &data.at(4), sizeof(int)); memcpy(&w, &data.at(8), sizeof(int)); memcpy(&h, &data.at(12), sizeof(int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&x)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&y)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&w)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&h)); } } assert(dx >= 0); assert(dy >= 0); assert(dw >= 0); assert(dh >= 0); assert(num_channels >= 1); int data_width = dw - dx + 1; int data_height = dh - dy + 1; std::vector<float> image( static_cast<size_t>(data_width * data_height * 4)); // 4 = RGBA // Read offset tables. int num_blocks = data_height / num_scanline_blocks; if (num_blocks * num_scanline_blocks < data_height) { num_blocks++; } std::vector<tinyexr::tinyexr_int64> offsets(static_cast<size_t>(num_blocks)); for (size_t y = 0; y < static_cast<size_t>(num_blocks); y++) { tinyexr::tinyexr_int64 offset; memcpy(&offset, marker, sizeof(tinyexr::tinyexr_int64)); tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64 *>(&offset)); marker += sizeof(tinyexr::tinyexr_int64); // = 8 offsets[y] = offset; } #if TINYEXR_USE_PIZ if ((compression_type == TINYEXR_COMPRESSIONTYPE_NONE) || (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) || (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ)) { #else if ((compression_type == TINYEXR_COMPRESSIONTYPE_NONE) || (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP)) { #endif // OK } else { tinyexr::SetErrorMessage("Unsupported compression format", err); return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } deep_image->image = static_cast<float ***>( malloc(sizeof(float **) * static_cast<size_t>(num_channels))); for (int c = 0; c < num_channels; c++) { deep_image->image[c] = static_cast<float **>( malloc(sizeof(float *) * static_cast<size_t>(data_height))); for (int y = 0; y < data_height; y++) { } } deep_image->offset_table = static_cast<int **>( malloc(sizeof(int *) * static_cast<size_t>(data_height))); for (int y = 0; y < data_height; y++) { deep_image->offset_table[y] = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(data_width))); } for (size_t y = 0; y < static_cast<size_t>(num_blocks); y++) { const unsigned char *data_ptr = reinterpret_cast<const unsigned char *>(head + offsets[y]); // int: y coordinate // int64: packed size of pixel offset table // int64: packed size of sample data // int64: unpacked size of sample data // compressed pixel offset table // compressed sample data int line_no; tinyexr::tinyexr_int64 packedOffsetTableSize; tinyexr::tinyexr_int64 packedSampleDataSize; tinyexr::tinyexr_int64 unpackedSampleDataSize; memcpy(&line_no, data_ptr, sizeof(int)); memcpy(&packedOffsetTableSize, data_ptr + 4, sizeof(tinyexr::tinyexr_int64)); memcpy(&packedSampleDataSize, data_ptr + 12, sizeof(tinyexr::tinyexr_int64)); memcpy(&unpackedSampleDataSize, data_ptr + 20, sizeof(tinyexr::tinyexr_int64)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&line_no)); tinyexr::swap8( reinterpret_cast<tinyexr::tinyexr_uint64 *>(&packedOffsetTableSize)); tinyexr::swap8( reinterpret_cast<tinyexr::tinyexr_uint64 *>(&packedSampleDataSize)); tinyexr::swap8( reinterpret_cast<tinyexr::tinyexr_uint64 *>(&unpackedSampleDataSize)); std::vector<int> pixelOffsetTable(static_cast<size_t>(data_width)); // decode pixel offset table. { unsigned long dstLen = static_cast<unsigned long>(pixelOffsetTable.size() * sizeof(int)); if (!tinyexr::DecompressZip( reinterpret_cast<unsigned char *>(&pixelOffsetTable.at(0)), &dstLen, data_ptr + 28, static_cast<unsigned long>(packedOffsetTableSize))) { return false; } assert(dstLen == pixelOffsetTable.size() * sizeof(int)); for (size_t i = 0; i < static_cast<size_t>(data_width); i++) { deep_image->offset_table[y][i] = pixelOffsetTable[i]; } } std::vector<unsigned char> sample_data( static_cast<size_t>(unpackedSampleDataSize)); // decode sample data. { unsigned long dstLen = static_cast<unsigned long>(unpackedSampleDataSize); if (dstLen) { if (!tinyexr::DecompressZip( reinterpret_cast<unsigned char *>(&sample_data.at(0)), &dstLen, data_ptr + 28 + packedOffsetTableSize, static_cast<unsigned long>(packedSampleDataSize))) { return false; } assert(dstLen == static_cast<unsigned long>(unpackedSampleDataSize)); } } // decode sample int sampleSize = -1; std::vector<int> channel_offset_list(static_cast<size_t>(num_channels)); { int channel_offset = 0; for (size_t i = 0; i < static_cast<size_t>(num_channels); i++) { channel_offset_list[i] = channel_offset; if (channels[i].pixel_type == TINYEXR_PIXELTYPE_UINT) { // UINT channel_offset += 4; } else if (channels[i].pixel_type == TINYEXR_PIXELTYPE_HALF) { // half channel_offset += 2; } else if (channels[i].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { // float channel_offset += 4; } else { assert(0); } } sampleSize = channel_offset; } assert(sampleSize >= 2); assert(static_cast<size_t>( pixelOffsetTable[static_cast<size_t>(data_width - 1)] * sampleSize) == sample_data.size()); int samples_per_line = static_cast<int>(sample_data.size()) / sampleSize; // // Alloc memory // // // pixel data is stored as image[channels][pixel_samples] // { tinyexr::tinyexr_uint64 data_offset = 0; for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { deep_image->image[c][y] = static_cast<float *>( malloc(sizeof(float) * static_cast<size_t>(samples_per_line))); if (channels[c].pixel_type == 0) { // UINT for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) { unsigned int ui; unsigned int *src_ptr = reinterpret_cast<unsigned int *>( &sample_data.at(size_t(data_offset) + x * sizeof(int))); tinyexr::cpy4(&ui, src_ptr); deep_image->image[c][y][x] = static_cast<float>(ui); // @fixme } data_offset += sizeof(unsigned int) * static_cast<size_t>(samples_per_line); } else if (channels[c].pixel_type == 1) { // half for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) { tinyexr::FP16 f16; const unsigned short *src_ptr = reinterpret_cast<unsigned short *>( &sample_data.at(size_t(data_offset) + x * sizeof(short))); tinyexr::cpy2(&(f16.u), src_ptr); tinyexr::FP32 f32 = half_to_float(f16); deep_image->image[c][y][x] = f32.f; } data_offset += sizeof(short) * static_cast<size_t>(samples_per_line); } else { // float for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) { float f; const float *src_ptr = reinterpret_cast<float *>( &sample_data.at(size_t(data_offset) + x * sizeof(float))); tinyexr::cpy4(&f, src_ptr); deep_image->image[c][y][x] = f; } data_offset += sizeof(float) * static_cast<size_t>(samples_per_line); } } } } // y deep_image->width = data_width; deep_image->height = data_height; deep_image->channel_names = static_cast<const char **>( malloc(sizeof(const char *) * static_cast<size_t>(num_channels))); for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { #ifdef _WIN32 deep_image->channel_names[c] = _strdup(channels[c].name.c_str()); #else deep_image->channel_names[c] = strdup(channels[c].name.c_str()); #endif } deep_image->num_channels = num_channels; return TINYEXR_SUCCESS; } void InitEXRImage(EXRImage *exr_image) { if (exr_image == NULL) { return; } exr_image->width = 0; exr_image->height = 0; exr_image->num_channels = 0; exr_image->images = NULL; exr_image->tiles = NULL; exr_image->num_tiles = 0; } void FreeEXRErrorMessage(const char *msg) { if (msg) { free(reinterpret_cast<void *>(const_cast<char *>(msg))); } return; } void InitEXRHeader(EXRHeader *exr_header) { if (exr_header == NULL) { return; } memset(exr_header, 0, sizeof(EXRHeader)); } int FreeEXRHeader(EXRHeader *exr_header) { if (exr_header == NULL) { return TINYEXR_ERROR_INVALID_ARGUMENT; } if (exr_header->channels) { free(exr_header->channels); } if (exr_header->pixel_types) { free(exr_header->pixel_types); } if (exr_header->requested_pixel_types) { free(exr_header->requested_pixel_types); } for (int i = 0; i < exr_header->num_custom_attributes; i++) { if (exr_header->custom_attributes[i].value) { free(exr_header->custom_attributes[i].value); } } if (exr_header->custom_attributes) { free(exr_header->custom_attributes); } return TINYEXR_SUCCESS; } int FreeEXRImage(EXRImage *exr_image) { if (exr_image == NULL) { return TINYEXR_ERROR_INVALID_ARGUMENT; } for (int i = 0; i < exr_image->num_channels; i++) { if (exr_image->images && exr_image->images[i]) { free(exr_image->images[i]); } } if (exr_image->images) { free(exr_image->images); } if (exr_image->tiles) { for (int tid = 0; tid < exr_image->num_tiles; tid++) { for (int i = 0; i < exr_image->num_channels; i++) { if (exr_image->tiles[tid].images && exr_image->tiles[tid].images[i]) { free(exr_image->tiles[tid].images[i]); } } if (exr_image->tiles[tid].images) { free(exr_image->tiles[tid].images); } } free(exr_image->tiles); } return TINYEXR_SUCCESS; } int ParseEXRHeaderFromFile(EXRHeader *exr_header, const EXRVersion *exr_version, const char *filename, const char **err) { if (exr_header == NULL || exr_version == NULL || filename == NULL) { tinyexr::SetErrorMessage("Invalid argument for ParseEXRHeaderFromFile", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "rb"); #else FILE *fp = fopen(filename, "rb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); std::vector<unsigned char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); fclose(fp); if (ret != filesize) { tinyexr::SetErrorMessage("fread() error on " + std::string(filename), err); return TINYEXR_ERROR_INVALID_FILE; } } return ParseEXRHeaderFromMemory(exr_header, exr_version, &buf.at(0), filesize, err); } int ParseEXRMultipartHeaderFromMemory(EXRHeader ***exr_headers, int *num_headers, const EXRVersion *exr_version, const unsigned char *memory, size_t size, const char **err) { if (memory == NULL || exr_headers == NULL || num_headers == NULL || exr_version == NULL) { // Invalid argument tinyexr::SetErrorMessage( "Invalid argument for ParseEXRMultipartHeaderFromMemory", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } if (size < tinyexr::kEXRVersionSize) { tinyexr::SetErrorMessage("Data size too short", err); return TINYEXR_ERROR_INVALID_DATA; } const unsigned char *marker = memory + tinyexr::kEXRVersionSize; size_t marker_size = size - tinyexr::kEXRVersionSize; std::vector<tinyexr::HeaderInfo> infos; for (;;) { tinyexr::HeaderInfo info; info.clear(); std::string err_str; bool empty_header = false; int ret = ParseEXRHeader(&info, &empty_header, exr_version, &err_str, marker, marker_size); if (ret != TINYEXR_SUCCESS) { tinyexr::SetErrorMessage(err_str, err); return ret; } if (empty_header) { marker += 1; // skip '\0' break; } // `chunkCount` must exist in the header. if (info.chunk_count == 0) { tinyexr::SetErrorMessage( "`chunkCount' attribute is not found in the header.", err); return TINYEXR_ERROR_INVALID_DATA; } infos.push_back(info); // move to next header. marker += info.header_len; size -= info.header_len; } // allocate memory for EXRHeader and create array of EXRHeader pointers. (*exr_headers) = static_cast<EXRHeader **>(malloc(sizeof(EXRHeader *) * infos.size())); for (size_t i = 0; i < infos.size(); i++) { EXRHeader *exr_header = static_cast<EXRHeader *>(malloc(sizeof(EXRHeader))); ConvertHeader(exr_header, infos[i]); // transfoer `tiled` from version. exr_header->tiled = exr_version->tiled; (*exr_headers)[i] = exr_header; } (*num_headers) = static_cast<int>(infos.size()); return TINYEXR_SUCCESS; } int ParseEXRMultipartHeaderFromFile(EXRHeader ***exr_headers, int *num_headers, const EXRVersion *exr_version, const char *filename, const char **err) { if (exr_headers == NULL || num_headers == NULL || exr_version == NULL || filename == NULL) { tinyexr::SetErrorMessage( "Invalid argument for ParseEXRMultipartHeaderFromFile()", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "rb"); #else FILE *fp = fopen(filename, "rb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); std::vector<unsigned char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); fclose(fp); if (ret != filesize) { tinyexr::SetErrorMessage("`fread' error. file may be corrupted.", err); return TINYEXR_ERROR_INVALID_FILE; } } return ParseEXRMultipartHeaderFromMemory( exr_headers, num_headers, exr_version, &buf.at(0), filesize, err); } int ParseEXRVersionFromMemory(EXRVersion *version, const unsigned char *memory, size_t size) { if (version == NULL || memory == NULL) { return TINYEXR_ERROR_INVALID_ARGUMENT; } if (size < tinyexr::kEXRVersionSize) { return TINYEXR_ERROR_INVALID_DATA; } const unsigned char *marker = memory; // Header check. { const char header[] = {0x76, 0x2f, 0x31, 0x01}; if (memcmp(marker, header, 4) != 0) { return TINYEXR_ERROR_INVALID_MAGIC_NUMBER; } marker += 4; } version->tiled = false; version->long_name = false; version->non_image = false; version->multipart = false; // Parse version header. { // must be 2 if (marker[0] != 2) { return TINYEXR_ERROR_INVALID_EXR_VERSION; } if (version == NULL) { return TINYEXR_SUCCESS; // May OK } version->version = 2; if (marker[1] & 0x2) { // 9th bit version->tiled = true; } if (marker[1] & 0x4) { // 10th bit version->long_name = true; } if (marker[1] & 0x8) { // 11th bit version->non_image = true; // (deep image) } if (marker[1] & 0x10) { // 12th bit version->multipart = true; } } return TINYEXR_SUCCESS; } int ParseEXRVersionFromFile(EXRVersion *version, const char *filename) { if (filename == NULL) { return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "rb"); #else FILE *fp = fopen(filename, "rb"); #endif if (!fp) { return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t file_size; // Compute size fseek(fp, 0, SEEK_END); file_size = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); if (file_size < tinyexr::kEXRVersionSize) { return TINYEXR_ERROR_INVALID_FILE; } unsigned char buf[tinyexr::kEXRVersionSize]; size_t ret = fread(&buf[0], 1, tinyexr::kEXRVersionSize, fp); fclose(fp); if (ret != tinyexr::kEXRVersionSize) { return TINYEXR_ERROR_INVALID_FILE; } return ParseEXRVersionFromMemory(version, buf, tinyexr::kEXRVersionSize); } int LoadEXRMultipartImageFromMemory(EXRImage *exr_images, const EXRHeader **exr_headers, unsigned int num_parts, const unsigned char *memory, const size_t size, const char **err) { if (exr_images == NULL || exr_headers == NULL || num_parts == 0 || memory == NULL || (size <= tinyexr::kEXRVersionSize)) { tinyexr::SetErrorMessage( "Invalid argument for LoadEXRMultipartImageFromMemory()", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } // compute total header size. size_t total_header_size = 0; for (unsigned int i = 0; i < num_parts; i++) { if (exr_headers[i]->header_len == 0) { tinyexr::SetErrorMessage("EXRHeader variable is not initialized.", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } total_header_size += exr_headers[i]->header_len; } const char *marker = reinterpret_cast<const char *>( memory + total_header_size + 4 + 4); // +8 for magic number and version header. marker += 1; // Skip empty header. // NOTE 1: // In multipart image, There is 'part number' before chunk data. // 4 byte : part number // 4+ : chunk // // NOTE 2: // EXR spec says 'part number' is 'unsigned long' but actually this is // 'unsigned int(4 bytes)' in OpenEXR implementation... // http://www.openexr.com/openexrfilelayout.pdf // Load chunk offset table. std::vector<std::vector<tinyexr::tinyexr_uint64> > chunk_offset_table_list; for (size_t i = 0; i < static_cast<size_t>(num_parts); i++) { std::vector<tinyexr::tinyexr_uint64> offset_table( static_cast<size_t>(exr_headers[i]->chunk_count)); for (size_t c = 0; c < offset_table.size(); c++) { tinyexr::tinyexr_uint64 offset; memcpy(&offset, marker, 8); tinyexr::swap8(&offset); if (offset >= size) { tinyexr::SetErrorMessage("Invalid offset size in EXR header chunks.", err); return TINYEXR_ERROR_INVALID_DATA; } offset_table[c] = offset + 4; // +4 to skip 'part number' marker += 8; } chunk_offset_table_list.push_back(offset_table); } // Decode image. for (size_t i = 0; i < static_cast<size_t>(num_parts); i++) { std::vector<tinyexr::tinyexr_uint64> &offset_table = chunk_offset_table_list[i]; // First check 'part number' is identitical to 'i' for (size_t c = 0; c < offset_table.size(); c++) { const unsigned char *part_number_addr = memory + offset_table[c] - 4; // -4 to move to 'part number' field. unsigned int part_no; memcpy(&part_no, part_number_addr, sizeof(unsigned int)); // 4 tinyexr::swap4(&part_no); if (part_no != i) { tinyexr::SetErrorMessage("Invalid `part number' in EXR header chunks.", err); return TINYEXR_ERROR_INVALID_DATA; } } std::string e; int ret = tinyexr::DecodeChunk(&exr_images[i], exr_headers[i], offset_table, memory, size, &e); if (ret != TINYEXR_SUCCESS) { if (!e.empty()) { tinyexr::SetErrorMessage(e, err); } return ret; } } return TINYEXR_SUCCESS; } int LoadEXRMultipartImageFromFile(EXRImage *exr_images, const EXRHeader **exr_headers, unsigned int num_parts, const char *filename, const char **err) { if (exr_images == NULL || exr_headers == NULL || num_parts == 0) { tinyexr::SetErrorMessage( "Invalid argument for LoadEXRMultipartImageFromFile", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "rb"); #else FILE *fp = fopen(filename, "rb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); std::vector<unsigned char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); fclose(fp); (void)ret; } return LoadEXRMultipartImageFromMemory(exr_images, exr_headers, num_parts, &buf.at(0), filesize, err); } int SaveEXR(const float *data, int width, int height, int components, const int save_as_fp16, const char *outfilename, const char **err) { if ((components == 1) || components == 3 || components == 4) { // OK } else { std::stringstream ss; ss << "Unsupported component value : " << components << std::endl; tinyexr::SetErrorMessage(ss.str(), err); return TINYEXR_ERROR_INVALID_ARGUMENT; } EXRHeader header; InitEXRHeader(&header); if ((width < 16) && (height < 16)) { // No compression for small image. header.compression_type = TINYEXR_COMPRESSIONTYPE_NONE; } else { header.compression_type = TINYEXR_COMPRESSIONTYPE_ZIP; } EXRImage image; InitEXRImage(&image); image.num_channels = components; std::vector<float> images[4]; if (components == 1) { images[0].resize(static_cast<size_t>(width * height)); memcpy(images[0].data(), data, sizeof(float) * size_t(width * height)); } else { images[0].resize(static_cast<size_t>(width * height)); images[1].resize(static_cast<size_t>(width * height)); images[2].resize(static_cast<size_t>(width * height)); images[3].resize(static_cast<size_t>(width * height)); // Split RGB(A)RGB(A)RGB(A)... into R, G and B(and A) layers for (size_t i = 0; i < static_cast<size_t>(width * height); i++) { images[0][i] = data[static_cast<size_t>(components) * i + 0]; images[1][i] = data[static_cast<size_t>(components) * i + 1]; images[2][i] = data[static_cast<size_t>(components) * i + 2]; if (components == 4) { images[3][i] = data[static_cast<size_t>(components) * i + 3]; } } } float *image_ptr[4] = {0, 0, 0, 0}; if (components == 4) { image_ptr[0] = &(images[3].at(0)); // A image_ptr[1] = &(images[2].at(0)); // B image_ptr[2] = &(images[1].at(0)); // G image_ptr[3] = &(images[0].at(0)); // R } else if (components == 3) { image_ptr[0] = &(images[2].at(0)); // B image_ptr[1] = &(images[1].at(0)); // G image_ptr[2] = &(images[0].at(0)); // R } else if (components == 1) { image_ptr[0] = &(images[0].at(0)); // A } image.images = reinterpret_cast<unsigned char **>(image_ptr); image.width = width; image.height = height; header.num_channels = components; header.channels = static_cast<EXRChannelInfo *>(malloc( sizeof(EXRChannelInfo) * static_cast<size_t>(header.num_channels))); // Must be (A)BGR order, since most of EXR viewers expect this channel order. if (components == 4) { #ifdef _MSC_VER strncpy_s(header.channels[0].name, "A", 255); strncpy_s(header.channels[1].name, "B", 255); strncpy_s(header.channels[2].name, "G", 255); strncpy_s(header.channels[3].name, "R", 255); #else strncpy(header.channels[0].name, "A", 255); strncpy(header.channels[1].name, "B", 255); strncpy(header.channels[2].name, "G", 255); strncpy(header.channels[3].name, "R", 255); #endif header.channels[0].name[strlen("A")] = '\0'; header.channels[1].name[strlen("B")] = '\0'; header.channels[2].name[strlen("G")] = '\0'; header.channels[3].name[strlen("R")] = '\0'; } else if (components == 3) { #ifdef _MSC_VER strncpy_s(header.channels[0].name, "B", 255); strncpy_s(header.channels[1].name, "G", 255); strncpy_s(header.channels[2].name, "R", 255); #else strncpy(header.channels[0].name, "B", 255); strncpy(header.channels[1].name, "G", 255); strncpy(header.channels[2].name, "R", 255); #endif header.channels[0].name[strlen("B")] = '\0'; header.channels[1].name[strlen("G")] = '\0'; header.channels[2].name[strlen("R")] = '\0'; } else { #ifdef _MSC_VER strncpy_s(header.channels[0].name, "A", 255); #else strncpy(header.channels[0].name, "A", 255); #endif header.channels[0].name[strlen("A")] = '\0'; } header.pixel_types = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(header.num_channels))); header.requested_pixel_types = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(header.num_channels))); for (int i = 0; i < header.num_channels; i++) { header.pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; // pixel type of input image if (save_as_fp16 > 0) { header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_HALF; // save with half(fp16) pixel format } else { header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; // save with float(fp32) pixel format(i.e. // no precision reduction) } } int ret = SaveEXRImageToFile(&image, &header, outfilename, err); if (ret != TINYEXR_SUCCESS) { return ret; } free(header.channels); free(header.pixel_types); free(header.requested_pixel_types); return ret; } #ifdef __clang__ // zero-as-null-ppinter-constant #pragma clang diagnostic pop #endif #endif // TINYEXR_IMPLEMENTATION_DEIFNED #endif // TINYEXR_IMPLEMENTATION
jacobi-block-task.c
#include "hclib.h" #ifdef __cplusplus #include "hclib_cpp.h" #include "hclib_system.h" #ifdef __CUDACC__ #include "hclib_cuda.h" #endif #endif # include "poisson.h" /* #pragma omp task/taskwait version of SWEEP. */ typedef struct _pragma26_omp_task { int (*it_ptr); int block_x; int block_y; int (*max_blocks_x_ptr); int (*max_blocks_y_ptr); int (*nx_ptr); int (*ny_ptr); double (*dx_ptr); double (*dy_ptr); double (*(*f__ptr)); int (*itold_ptr); int (*itnew_ptr); double (*(*u__ptr)); double (*(*unew__ptr)); int (*block_size_ptr); } pragma26_omp_task; typedef struct _pragma36_omp_task { int block_x; int block_y; int (*nx_ptr); int (*ny_ptr); double (*dx_ptr); double (*dy_ptr); double (*(*f__ptr)); double (*(*u__ptr)); double (*(*unew__ptr)); int (*block_size_ptr); } pragma36_omp_task; static void pragma26_omp_task_hclib_async(void *____arg); static void pragma36_omp_task_hclib_async(void *____arg); void sweep (int nx, int ny, double dx, double dy, double *f_, int itold, int itnew, double *u_, double *unew_, int block_size) { int it; int block_x, block_y; if (block_size == 0) block_size = nx; int max_blocks_x = (nx / block_size); int max_blocks_y = (ny / block_size); hclib_start_finish(); { for (it = itold + 1; it <= itnew; it++) { // Save the current estimate. for (block_x = 0; block_x < max_blocks_x; block_x++) { for (block_y = 0; block_y < max_blocks_y; block_y++) { { pragma26_omp_task *new_ctx = (pragma26_omp_task *)malloc(sizeof(pragma26_omp_task)); new_ctx->it_ptr = &(it); new_ctx->block_x = block_x; new_ctx->block_y = block_y; new_ctx->max_blocks_x_ptr = &(max_blocks_x); new_ctx->max_blocks_y_ptr = &(max_blocks_y); new_ctx->nx_ptr = &(nx); new_ctx->ny_ptr = &(ny); new_ctx->dx_ptr = &(dx); new_ctx->dy_ptr = &(dy); new_ctx->f__ptr = &(f_); new_ctx->itold_ptr = &(itold); new_ctx->itnew_ptr = &(itnew); new_ctx->u__ptr = &(u_); new_ctx->unew__ptr = &(unew_); new_ctx->block_size_ptr = &(block_size); hclib_async(pragma26_omp_task_hclib_async, new_ctx, NO_FUTURE, ANY_PLACE); } ; } } hclib_end_finish(); hclib_start_finish(); ; // Compute a new estimate. for (block_x = 0; block_x < max_blocks_x; block_x++) { for (block_y = 0; block_y < max_blocks_y; block_y++) { { pragma36_omp_task *new_ctx = (pragma36_omp_task *)malloc(sizeof(pragma36_omp_task)); new_ctx->block_x = block_x; new_ctx->block_y = block_y; new_ctx->nx_ptr = &(nx); new_ctx->ny_ptr = &(ny); new_ctx->dx_ptr = &(dx); new_ctx->dy_ptr = &(dy); new_ctx->f__ptr = &(f_); new_ctx->u__ptr = &(u_); new_ctx->unew__ptr = &(unew_); new_ctx->block_size_ptr = &(block_size); hclib_async(pragma36_omp_task_hclib_async, new_ctx, NO_FUTURE, ANY_PLACE); } ; } } hclib_end_finish(); hclib_start_finish(); ; } } ; hclib_end_finish(); } static void pragma26_omp_task_hclib_async(void *____arg) { pragma26_omp_task *ctx = (pragma26_omp_task *)____arg; int block_x; block_x = ctx->block_x; int block_y; block_y = ctx->block_y; hclib_start_finish(); copy_block((*(ctx->nx_ptr)), (*(ctx->ny_ptr)), block_x, block_y, (*(ctx->u__ptr)), (*(ctx->unew__ptr)), (*(ctx->block_size_ptr))) ; ; hclib_end_finish_nonblocking(); free(____arg); } static void pragma36_omp_task_hclib_async(void *____arg) { pragma36_omp_task *ctx = (pragma36_omp_task *)____arg; int block_x; block_x = ctx->block_x; int block_y; block_y = ctx->block_y; hclib_start_finish(); compute_estimate(block_x, block_y, (*(ctx->u__ptr)), (*(ctx->unew__ptr)), (*(ctx->f__ptr)), (*(ctx->dx_ptr)), (*(ctx->dy_ptr)), (*(ctx->nx_ptr)), (*(ctx->ny_ptr)), (*(ctx->block_size_ptr))) ; ; hclib_end_finish_nonblocking(); free(____arg); }
idasFoodWeb_bnd_omp.c
/* * ----------------------------------------------------------------- * Programmer(s): Daniel R. Reynolds and Ting Yan @ SMU * Based on idaFoodWeb_bnd.c and parallelized with OpenMP * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2022, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * Example program for IDAS: Food web problem. * * This example program (OpenMP version) uses the SUNBAND linear * solver, and IDACalcIC for initial condition calculation. * * The mathematical problem solved in this example is a DAE system * that arises from a system of partial differential equations after * spatial discretization. The PDE system is a food web population * model, with predator-prey interaction and diffusion on the unit * square in two dimensions. The dependent variable vector is: * * 1 2 ns * c = (c , c , ..., c ) , ns = 2 * np * * and the PDE's are as follows: * * i i i * dc /dt = d(i)*(c + c ) + R (x,y,c) (i = 1,...,np) * xx yy i * * i i * 0 = d(i)*(c + c ) + R (x,y,c) (i = np+1,...,ns) * xx yy i * * where the reaction terms R are: * * i ns j * R (x,y,c) = c * (b(i) + sum a(i,j)*c ) * i j=1 * * The number of species is ns = 2 * np, with the first np being * prey and the last np being predators. The coefficients a(i,j), * b(i), d(i) are: * * a(i,i) = -AA (all i) * a(i,j) = -GG (i <= np , j > np) * a(i,j) = EE (i > np, j <= np) * all other a(i,j) = 0 * b(i) = BB*(1+ alpha * x*y + beta*sin(4 pi x)*sin(4 pi y)) (i <= np) * b(i) =-BB*(1+ alpha * x*y + beta*sin(4 pi x)*sin(4 pi y)) (i > np) * d(i) = DPREY (i <= np) * d(i) = DPRED (i > np) * * The various scalar parameters required are set using '#define' * statements or directly in routine InitUserData. In this program, * np = 1, ns = 2. The boundary conditions are homogeneous Neumann: * normal derivative = 0. * * A polynomial in x and y is used to set the initial values of the * first np variables (the prey variables) at each x,y location, * while initial values for the remaining (predator) variables are * set to a flat value, which is corrected by IDACalcIC. * * The PDEs are discretized by central differencing on a MX by MY * mesh. * * The DAE system is solved by IDAS using the SUNBAND linear solver. * Output is printed at t = 0, .001, .01, .1, .4, .7, 1. * * Optionally, we can set the number of threads from environment * variable or command line. To check the current value for number * of threads from environment: * % echo $OMP_NUM_THREADS * * Execution: * * To use the default value for the number of threads from * the OMP_NUM_THREADS environment value: * % ./idasFoodWeb_bnd_omp * To specify the number of threads at the command line, use * % ./idasFoodWeb_bnd_omp num_threads * where num_threads is the desired number of threads. * * ----------------------------------------------------------------- * References: * [1] Peter N. Brown and Alan C. Hindmarsh, * Reduced Storage Matrix Methods in Stiff ODE systems, Journal * of Applied Mathematics and Computation, Vol. 31 (May 1989), * pp. 40-91. * * [2] Peter N. Brown, Alan C. Hindmarsh, and Linda R. Petzold, * Using Krylov Methods in the Solution of Large-Scale * Differential-Algebraic Systems, SIAM J. Sci. Comput., 15 * (1994), pp. 1467-1488. * * [3] Peter N. Brown, Alan C. Hindmarsh, and Linda R. Petzold, * Consistent Initial Condition Calculation for Differential- * Algebraic Systems, SIAM J. Sci. Comput., 19 (1998), * pp. 1495-1512. * ----------------------------------------------------------------- */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <idas/idas.h> #include <sunmatrix/sunmatrix_band.h> #include <sunlinsol/sunlinsol_band.h> #include <nvector/nvector_openmp.h> #include <sundials/sundials_direct.h> #include <sundials/sundials_types.h> #ifdef _OPENMP #include <omp.h> #endif /* Problem Constants. */ #define NPREY 1 /* No. of prey (= no. of predators). */ #define NUM_SPECIES 2*NPREY #define PI RCONST(3.1415926535898) #define FOURPI (RCONST(4.0)*PI) #define MX 20 /* MX = number of x mesh points */ #define MY 20 /* MY = number of y mesh points */ #define NSMX (NUM_SPECIES * MX) #define NEQ (NUM_SPECIES*MX*MY) #define AA RCONST(1.0) /* Coefficient in above eqns. for a */ #define EE RCONST(10000.) /* Coefficient in above eqns. for a */ #define GG RCONST(0.5e-6) /* Coefficient in above eqns. for a */ #define BB RCONST(1.0) /* Coefficient in above eqns. for b */ #define DPREY RCONST(1.0) /* Coefficient in above eqns. for d */ #define DPRED RCONST(0.05) /* Coefficient in above eqns. for d */ #define ALPHA RCONST(50.) /* Coefficient alpha in above eqns. */ #define BETA RCONST(1000.) /* Coefficient beta in above eqns. */ #define AX RCONST(1.0) /* Total range of x variable */ #define AY RCONST(1.0) /* Total range of y variable */ #define RTOL RCONST(1.e-5) /* Relative tolerance */ #define ATOL RCONST(1.e-5) /* Absolute tolerance */ #define NOUT 6 /* Number of output times */ #define TMULT RCONST(10.0) /* Multiplier for tout values */ #define TADD RCONST(0.3) /* Increment for tout values */ #define ZERO RCONST(0.) #define ONE RCONST(1.0) /* * User-defined vector and accessor macro: IJ_Vptr. * IJ_Vptr is defined in order to express the underlying 3-D structure of * the dependent variable vector from its underlying 1-D storage (an N_Vector). * IJ_Vptr(vv,i,j) returns a pointer to the location in vv corresponding to * species index is = 0, x-index ix = i, and y-index jy = j. */ #define IJ_Vptr(vv,i,j) (&NV_Ith_OMP(vv, (i)*NUM_SPECIES + (j)*NSMX)) /* Type: UserData. Contains problem constants, etc. */ typedef struct { sunindextype Neq, ns, np, mx, my; realtype dx, dy, **acoef; realtype cox[NUM_SPECIES], coy[NUM_SPECIES], bcoef[NUM_SPECIES]; N_Vector rates; int nthreads; } *UserData; /* Prototypes for functions called by the IDA Solver. */ static int resweb(realtype time, N_Vector cc, N_Vector cp, N_Vector resval, void *user_data); /* Prototypes for private Helper Functions. */ static void InitUserData(UserData webdata); static void SetInitialProfiles(N_Vector cc, N_Vector cp, N_Vector id, UserData webdata); static void PrintHeader(sunindextype mu, sunindextype ml, realtype rtol, realtype atol); static void PrintOutput(void *ida_mem, N_Vector c, realtype t); static void PrintFinalStats(void *ida_mem); static void Fweb(realtype tcalc, N_Vector cc, N_Vector crate, UserData webdata); static void WebRates(realtype xx, realtype yy, realtype *cxy, realtype *ratesxy, UserData webdata); static realtype dotprod(sunindextype size, realtype *x1, realtype *x2); static int check_retval(void *returnvalue, char *funcname, int opt); /* *-------------------------------------------------------------------- * MAIN PROGRAM *-------------------------------------------------------------------- */ int main(int argc, char *argv[]) { void *ida_mem; SUNMatrix A; SUNLinearSolver LS; UserData webdata; N_Vector cc, cp, id; int iout, retval; sunindextype mu, ml; realtype rtol, atol, t0, tout, tret; int num_threads; SUNContext ctx; ida_mem = NULL; A = NULL; LS = NULL; webdata = NULL; cc = cp = id = NULL; /* Set the number of threads to use */ num_threads = 1; /* default value */ #ifdef _OPENMP num_threads = omp_get_max_threads(); /* overwrite with OMP_NUM_THREADS enviroment variable */ #endif if (argc > 1) /* overwrite with command line value, if supplied */ num_threads = (int) strtol(argv[1], NULL, 0); /* Create the SUNDIALS context object for this simulation */ retval = SUNContext_Create(NULL, &ctx); if (check_retval(&retval, "SUNContext_Create", 1)) return 1; /* Allocate and initialize user data block webdata. */ webdata = (UserData) malloc(sizeof *webdata); webdata->rates = N_VNew_OpenMP(NEQ, num_threads, ctx); webdata->acoef = SUNDlsMat_newDenseMat(NUM_SPECIES, NUM_SPECIES); webdata->nthreads = num_threads; InitUserData(webdata); /* Allocate N-vectors and initialize cc, cp, and id. */ cc = N_VNew_OpenMP(NEQ, num_threads, ctx); if(check_retval((void *)cc, "N_VNew_OpenMP", 0)) return(1); cp = N_VClone(cc); if(check_retval((void *)cp, "N_VNew_OpenMP", 0)) return(1); id = N_VClone(cc); if(check_retval((void *)id, "N_VNew_OpenMP", 0)) return(1); SetInitialProfiles(cc, cp, id, webdata); /* Set remaining inputs to IDAMalloc. */ t0 = ZERO; rtol = RTOL; atol = ATOL; /* Call IDACreate and IDAMalloc to initialize IDA. */ ida_mem = IDACreate(ctx); if(check_retval((void *)ida_mem, "IDACreate", 0)) return(1); retval = IDASetUserData(ida_mem, webdata); if(check_retval(&retval, "IDASetUserData", 1)) return(1); retval = IDASetId(ida_mem, id); if(check_retval(&retval, "IDASetId", 1)) return(1); retval = IDAInit(ida_mem, resweb, t0, cc, cp); if(check_retval(&retval, "IDAInit", 1)) return(1); retval = IDASStolerances(ida_mem, rtol, atol); if(check_retval(&retval, "IDASStolerances", 1)) return(1); /* Setup band matrix and linear solver, and attach to IDA. */ mu = ml = NSMX; A = SUNBandMatrix(NEQ, mu, ml, ctx); if(check_retval((void *)A, "SUNBandMatrix", 0)) return(1); LS = SUNLinSol_Band(cc, A, ctx); if(check_retval((void *)LS, "SUNLinSol_Band", 0)) return(1); retval = IDASetLinearSolver(ida_mem, LS, A); if(check_retval(&retval, "IDASetLinearSolver", 1)) return(1); /* Call IDACalcIC (with default options) to correct the initial values. */ tout = RCONST(0.001); retval = IDACalcIC(ida_mem, IDA_YA_YDP_INIT, tout); if(check_retval(&retval, "IDACalcIC", 1)) return(1); /* Print heading, basic parameters, and initial values. */ PrintHeader(mu, ml, rtol, atol); PrintOutput(ida_mem, cc, ZERO); /* Loop over iout, call IDASolve (normal mode), print selected output. */ for (iout = 1; iout <= NOUT; iout++) { retval = IDASolve(ida_mem, tout, &tret, cc, cp, IDA_NORMAL); if(check_retval(&retval, "IDASolve", 1)) return(retval); PrintOutput(ida_mem, cc, tret); if (iout < 3) tout *= TMULT; else tout += TADD; } /* Print final statistics and free memory. */ PrintFinalStats(ida_mem); printf("num_threads = %i\n\n", num_threads); /* Free memory */ IDAFree(&ida_mem); SUNLinSolFree(LS); SUNMatDestroy(A); N_VDestroy_OpenMP(cc); N_VDestroy_OpenMP(cp); N_VDestroy_OpenMP(id); SUNDlsMat_destroyMat(webdata->acoef); N_VDestroy_OpenMP(webdata->rates); free(webdata); SUNContext_Free(&ctx); return(0); } /* Define lines for readability in later routines */ #define acoef (webdata->acoef) #define bcoef (webdata->bcoef) #define cox (webdata->cox) #define coy (webdata->coy) /* *-------------------------------------------------------------------- * FUNCTIONS CALLED BY IDA *-------------------------------------------------------------------- */ /* * resweb: System residual function for predator-prey system. * This routine calls Fweb to get all the right-hand sides of the * equations, then loads the residual vector accordingly, * using cp in the case of prey species. */ static int resweb(realtype tt, N_Vector cc, N_Vector cp, N_Vector res, void *user_data) { sunindextype jx, jy, is, yloc, loc, np; realtype *resv, *cpv; UserData webdata; jx = jy = is = 0; webdata = (UserData)user_data; cpv = NV_DATA_OMP(cp); resv = NV_DATA_OMP(res); np = webdata->np; /* Call Fweb to set res to vector of right-hand sides. */ Fweb(tt, cc, res, webdata); /* Loop over all grid points, setting residual values appropriately for differential or algebraic components. */ #pragma omp parallel for default(shared) private(jy, yloc, jx, loc, is) schedule(static) num_threads(webdata->nthreads) for (jy = 0; jy < MY; jy++) { yloc = NSMX * jy; for (jx = 0; jx < MX; jx++) { loc = yloc + NUM_SPECIES * jx; for (is = 0; is < NUM_SPECIES; is++) { if (is < np) resv[loc+is] = cpv[loc+is] - resv[loc+is]; else resv[loc+is] = -resv[loc+is]; } } } return(0); } /* *-------------------------------------------------------------------- * PRIVATE FUNCTIONS *-------------------------------------------------------------------- */ /* * InitUserData: Load problem constants in webdata (of type UserData). */ static void InitUserData(UserData webdata) { sunindextype i, j, np; realtype *a1,*a2, *a3, *a4, dx2, dy2; webdata->mx = MX; webdata->my = MY; webdata->ns = NUM_SPECIES; webdata->np = NPREY; webdata->dx = AX/(MX-1); webdata->dy = AY/(MY-1); webdata->Neq= NEQ; /* Set up the coefficients a and b, and others found in the equations. */ np = webdata->np; dx2 = (webdata->dx)*(webdata->dx); dy2 = (webdata->dy)*(webdata->dy); for (i = 0; i < np; i++) { a1 = &(acoef[i][np]); a2 = &(acoef[i+np][0]); a3 = &(acoef[i][0]); a4 = &(acoef[i+np][np]); /* Fill in the portion of acoef in the four quadrants, row by row. */ for (j = 0; j < np; j++) { *a1++ = -GG; *a2++ = EE; *a3++ = ZERO; *a4++ = ZERO; } /* Reset the diagonal elements of acoef to -AA. */ acoef[i][i] = -AA; acoef[i+np][i+np] = -AA; /* Set coefficients for b and diffusion terms. */ bcoef[i] = BB; bcoef[i+np] = -BB; cox[i] = DPREY/dx2; cox[i+np] = DPRED/dx2; coy[i] = DPREY/dy2; coy[i+np] = DPRED/dy2; } } /* * SetInitialProfiles: Set initial conditions in cc, cp, and id. * A polynomial profile is used for the prey cc values, and a constant * (1.0e5) is loaded as the initial guess for the predator cc values. * The id values are set to 1 for the prey and 0 for the predators. * The prey cp values are set according to the given system, and * the predator cp values are set to zero. */ static void SetInitialProfiles(N_Vector cc, N_Vector cp, N_Vector id, UserData webdata) { sunindextype loc, yloc, is, jx, jy, np; realtype xx, yy, xyfactor; realtype *ccv, *cpv, *idv; ccv = NV_DATA_OMP(cc); cpv = NV_DATA_OMP(cp); idv = NV_DATA_OMP(id); np = webdata->np; /* Loop over grid, load cc values and id values. */ for (jy = 0; jy < MY; jy++) { yy = jy * webdata->dy; yloc = NSMX * jy; for (jx = 0; jx < MX; jx++) { xx = jx * webdata->dx; xyfactor = RCONST(16.0)*xx*(ONE-xx)*yy*(ONE-yy); xyfactor *= xyfactor; loc = yloc + NUM_SPECIES*jx; for (is = 0; is < NUM_SPECIES; is++) { if (is < np) { ccv[loc+is] = RCONST(10.0) + (realtype)(is+1) * xyfactor; idv[loc+is] = ONE; } else { ccv[loc+is] = RCONST(1.0e5); idv[loc+is] = ZERO; } } } } /* Set c' for the prey by calling the function Fweb. */ Fweb(ZERO, cc, cp, webdata); /* Set c' for predators to 0. */ for (jy = 0; jy < MY; jy++) { yloc = NSMX * jy; for (jx = 0; jx < MX; jx++) { loc = yloc + NUM_SPECIES * jx; for (is = np; is < NUM_SPECIES; is++) { cpv[loc+is] = ZERO; } } } } /* * Print first lines of output (problem description) */ static void PrintHeader(sunindextype mu, sunindextype ml, realtype rtol, realtype atol) { printf("\nidasFoodWeb_bnd_omp: Predator-prey DAE OpenMP example problem for IDAS \n\n"); printf("Number of species ns: %d", NUM_SPECIES); printf(" Mesh dimensions: %d x %d", MX, MY); printf(" System size: %d\n", NEQ); #if defined(SUNDIALS_EXTENDED_PRECISION) printf("Tolerance parameters: rtol = %Lg atol = %Lg\n", rtol, atol); #elif defined(SUNDIALS_DOUBLE_PRECISION) printf("Tolerance parameters: rtol = %g atol = %g\n", rtol, atol); #else printf("Tolerance parameters: rtol = %g atol = %g\n", rtol, atol); #endif printf("Linear solver: SUNBAND, Band parameters mu = %ld, ml = %ld\n", (long int) mu, (long int) ml); printf("CalcIC called to correct initial predator concentrations.\n\n"); printf("-----------------------------------------------------------\n"); printf(" t bottom-left top-right"); printf(" | nst k h\n"); printf("-----------------------------------------------------------\n\n"); } /* * PrintOutput: Print output values at output time t = tt. * Selected run statistics are printed. Then values of the concentrations * are printed for the bottom left and top right grid points only. */ static void PrintOutput(void *ida_mem, N_Vector c, realtype t) { int i, kused, retval; long int nst; realtype *c_bl, *c_tr, hused; retval = IDAGetLastOrder(ida_mem, &kused); check_retval(&retval, "IDAGetLastOrder", 1); retval = IDAGetNumSteps(ida_mem, &nst); check_retval(&retval, "IDAGetNumSteps", 1); retval = IDAGetLastStep(ida_mem, &hused); check_retval(&retval, "IDAGetLastStep", 1); c_bl = IJ_Vptr(c,0,0); c_tr = IJ_Vptr(c,MX-1,MY-1); #if defined(SUNDIALS_EXTENDED_PRECISION) printf("%8.2Le %12.4Le %12.4Le | %3ld %1d %12.4Le\n", t, c_bl[0], c_tr[0], nst, kused, hused); for (i=1;i<NUM_SPECIES;i++) printf(" %12.4Le %12.4Le |\n",c_bl[i],c_tr[i]); #elif defined(SUNDIALS_DOUBLE_PRECISION) printf("%8.2e %12.4e %12.4e | %3ld %1d %12.4e\n", t, c_bl[0], c_tr[0], nst, kused, hused); for (i=1;i<NUM_SPECIES;i++) printf(" %12.4e %12.4e |\n",c_bl[i],c_tr[i]); #else printf("%8.2e %12.4e %12.4e | %3ld %1d %12.4e\n", t, c_bl[0], c_tr[0], nst, kused, hused); for (i=1;i<NUM_SPECIES;i++) printf(" %12.4e %12.4e |\n",c_bl[i],c_tr[i]); #endif printf("\n"); } /* * PrintFinalStats: Print final run data contained in iopt. */ static void PrintFinalStats(void *ida_mem) { long int nst, nre, nreLS, nni, nje, netf, ncfn; int retval; retval = IDAGetNumSteps(ida_mem, &nst); check_retval(&retval, "IDAGetNumSteps", 1); retval = IDAGetNumNonlinSolvIters(ida_mem, &nni); check_retval(&retval, "IDAGetNumNonlinSolvIters", 1); retval = IDAGetNumResEvals(ida_mem, &nre); check_retval(&retval, "IDAGetNumResEvals", 1); retval = IDAGetNumErrTestFails(ida_mem, &netf); check_retval(&retval, "IDAGetNumErrTestFails", 1); retval = IDAGetNumNonlinSolvConvFails(ida_mem, &ncfn); check_retval(&retval, "IDAGetNumNonlinSolvConvFails", 1); retval = IDAGetNumJacEvals(ida_mem, &nje); check_retval(&retval, "IDAGetNumJacEvals", 1); retval = IDAGetNumLinResEvals(ida_mem, &nreLS); check_retval(&retval, "IDAGetNumLinResEvals", 1); printf("-----------------------------------------------------------\n"); printf("Final run statistics: \n\n"); printf("Number of steps = %ld\n", nst); printf("Number of residual evaluations = %ld\n", nre+nreLS); printf("Number of Jacobian evaluations = %ld\n", nje); printf("Number of nonlinear iterations = %ld\n", nni); printf("Number of error test failures = %ld\n", netf); printf("Number of nonlinear conv. failures = %ld\n", ncfn); } /* * Fweb: Rate function for the food-web problem. * This routine computes the right-hand sides of the system equations, * consisting of the diffusion term and interaction term. * The interaction term is computed by the function WebRates. */ static void Fweb(realtype tcalc, N_Vector cc, N_Vector crate, UserData webdata) { sunindextype jx, jy, is, idyu, idyl, idxu, idxl; realtype xx, yy, *cxy, *ratesxy, *cratexy, dcyli, dcyui, dcxli, dcxui; /* Loop over grid points, evaluate interaction vector (length ns), form diffusion difference terms, and load crate. */ jx = jy = is = 0; for (jy = 0; jy < MY; jy++) { yy = (webdata->dy) * jy ; idyu = (jy!=MY-1) ? NSMX : -NSMX; idyl = (jy!= 0 ) ? NSMX : -NSMX; for (jx = 0; jx < MX; jx++) { xx = (webdata->dx) * jx; idxu = (jx!= MX-1) ? NUM_SPECIES : -NUM_SPECIES; idxl = (jx!= 0 ) ? NUM_SPECIES : -NUM_SPECIES; cxy = IJ_Vptr(cc,jx,jy); ratesxy = IJ_Vptr(webdata->rates,jx,jy); cratexy = IJ_Vptr(crate,jx,jy); /* Get interaction vector at this grid point. */ WebRates(xx, yy, cxy, ratesxy, webdata); /* Loop over species, do differencing, load crate segment. */ #pragma omp parallel for default(shared) private(is, dcyli, dcyui, dcxli, dcxui) schedule(static) num_threads(webdata->nthreads) for (is = 0; is < NUM_SPECIES; is++) { /* Differencing in y. */ dcyli = *(cxy+is) - *(cxy - idyl + is) ; dcyui = *(cxy + idyu + is) - *(cxy+is); /* Differencing in x. */ dcxli = *(cxy+is) - *(cxy - idxl + is); dcxui = *(cxy + idxu +is) - *(cxy+is); /* Compute the crate values at (xx,yy). */ cratexy[is] = coy[is] * (dcyui - dcyli) + cox[is] * (dcxui - dcxli) + ratesxy[is]; } /* End is loop */ } /* End of jx loop */ } /* End of jy loop */ } /* * WebRates: Evaluate reaction rates at a given spatial point. * At a given (x,y), evaluate the array of ns reaction terms R. */ static void WebRates(realtype xx, realtype yy, realtype *cxy, realtype *ratesxy, UserData webdata) { int is; realtype fac; for (is = 0; is < NUM_SPECIES; is++) ratesxy[is] = dotprod(NUM_SPECIES, cxy, acoef[is]); fac = ONE + ALPHA*xx*yy + BETA*sin(FOURPI*xx)*sin(FOURPI*yy); for (is = 0; is < NUM_SPECIES; is++) ratesxy[is] = cxy[is]*( bcoef[is]*fac + ratesxy[is] ); } /* * dotprod: dot product routine for realtype arrays, for use by WebRates. */ static realtype dotprod(sunindextype size, realtype *x1, realtype *x2) { sunindextype i; realtype *xx1, *xx2, temp = ZERO; xx1 = x1; xx2 = x2; for (i = 0; i < size; i++) temp += (*xx1++) * (*xx2++); return(temp); } /* * Check function return value... * opt == 0 means SUNDIALS function allocates memory so check if * returned NULL pointer * opt == 1 means SUNDIALS function returns an integer value so check if * retval < 0 * opt == 2 means function allocates memory so check if returned * NULL pointer */ static int check_retval(void *returnvalue, char *funcname, int opt) { int *retval; if (opt == 0 && returnvalue == NULL) { /* Check if SUNDIALS function returned NULL pointer - no memory allocated */ fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed - returned NULL pointer\n\n", funcname); return(1); } else if (opt == 1) { /* Check if retval < 0 */ retval = (int *) returnvalue; if (*retval < 0) { fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed with retval = %d\n\n", funcname, *retval); return(1); } } else if (opt == 2 && returnvalue == NULL) { /* Check if function returned NULL pointer - no memory allocated */ fprintf(stderr, "\nMEMORY_ERROR: %s() failed - returned NULL pointer\n\n", funcname); return(1); } return(0); }
utils.h
/* Copyright (C) 2010 The Trustees of Indiana University. */ /* */ /* Use, modification and distribution is subject to the Boost Software */ /* License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at */ /* http://www.boost.org/LICENSE_1_0.txt) */ /* */ /* Authors: Jeremiah Willcock */ /* Andrew Lumsdaine */ #ifndef UTILS_H #define UTILS_H #ifndef __STDC_CONSTANT_MACROS #define __STDC_CONSTANT_MACROS #endif #include <stddef.h> #include <stdint.h> #include <assert.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #ifdef GRAPH_GENERATOR_MPI #include <mpi.h> #endif #ifdef GRAPH_GENERATOR_OMP #include <omp.h> #endif #include "splittable_mrg.h" #include "kronecker.h" #if defined(_OPENMP) #define OMP(x_) _Pragma(x_) #else #define OMP(x_) #endif #ifdef __cplusplus extern "C" { #endif #if defined(HAVE_LIBNUMA) #include <numa.h> static int numa_inited = 0; static int numa_avail = -1; void * xmalloc (size_t n) { void * out; if (!numa_inited) { OMP("omp critical") { numa_inited = 1; numa_avail = numa_available (); } } if (numa_avail) out = numa_alloc (sz); else out = malloc (sz); if (!out) { fprintf(stderr, "Out of memory trying to allocate %zu byte(s)\n", sz); abort (); } return out; } void * xcalloc (size_t n, size_t sz) { void * out; if (!numa_inited) { OMP("omp critical") { numa_inited = 1; numa_avail = numa_available (); } } if (numa_avail) { size_t to_alloc; to_alloc = n * sz; if (to_alloc < n || to_alloc < sz) { fprintf(stderr, "Allocation size out of range for %zu items of %zu byte(s)\n", n, sz); abort (); } out = numa_alloc (n * sz); #if defined(_OPENMP) #pragma omp parallel for for (size_t k = 0; k < n; ++k) memset (out + k * sz, 0, sz); #else memset (out, 0, n * sz); #endif } else out = calloc (n, sz); if (!out) { fprintf(stderr, "Out of memory trying to allocate/clear %zu items of %zu byte(s)\n", n, sz); abort (); } return out; } void xfree (void * p, size_t sz) { if (!p) return; if (numa_avail >= 0) numa_free (p, sz); else free (p); } #else void * xmalloc (size_t sz) { void * out; out = malloc (sz); if (!out) { fprintf(stderr, "Out of memory trying to allocate %zu byte(s)\n", sz); abort (); } return out; } void * xcalloc (size_t n, size_t sz) { void * out; out = calloc (n, sz); if (!out) { fprintf(stderr, "Out of memory trying to allocate/clear %zu items of %zu byte(s)\n", n, sz); abort (); } return out; } void xfree (void * p, size_t sz) { free (p); } #endif // void* xrealloc(void* p, size_t nbytes); /* In utils.c */ // uint_fast64_t random_up_to(mrg_state* st, uint_fast64_t n); void make_mrg_seed(uint64_t userseed1, uint64_t userseed2, uint_fast32_t* seed) { seed[0] = (uint32_t)(userseed1 & UINT32_C(0x3FFFFFFF)) + 1; seed[1] = (uint32_t)((userseed1 >> 30) & UINT32_C(0x3FFFFFFF)) + 1; seed[2] = (uint32_t)(userseed2 & UINT32_C(0x3FFFFFFF)) + 1; seed[3] = (uint32_t)((userseed2 >> 30) & UINT32_C(0x3FFFFFFF)) + 1; seed[4] = (uint32_t)((userseed2 >> 60) << 4) + (uint32_t)(userseed1 >> 60) + 1; } #ifdef __cplusplus } #endif #endif /* UTILS_H */
update_ops_named_Y.c
#include "constant.h" #include "update_ops.h" #include "utility.h" #ifdef _OPENMP #include <omp.h> #endif #ifdef _MSC_VER #include <intrin.h> #else #include <x86intrin.h> #endif //void Y_gate_old_single(UINT target_qubit_index, CTYPE *state, ITYPE dim); //void Y_gate_old_parallel(UINT target_qubit_index, CTYPE *state, ITYPE dim); //void Y_gate_single(UINT target_qubit_index, CTYPE *state, ITYPE dim); //void Y_gate_parallel(UINT target_qubit_index, CTYPE *state, ITYPE dim); void Y_gate(UINT target_qubit_index, CTYPE *state, ITYPE dim) { //Y_gate_old_single(target_qubit_index, state, dim); //Y_gate_old_parallel(target_qubit_index, state, dim); //Y_gate_single(target_qubit_index, state, dim); //Y_gate_single_simd(target_qubit_index, state, dim); //Y_gate_single_unroll(target_qubit_index, state, dim); //Y_gate_parallel(target_qubit_index, state, dim); //return; #ifdef _USE_SIMD #ifdef _OPENMP UINT threshold = 13; if (dim < (((ITYPE)1) << threshold)) { Y_gate_single_simd(target_qubit_index, state, dim); } else { Y_gate_parallel_simd(target_qubit_index, state, dim); } #else Y_gate_single_simd(target_qubit_index, state, dim); #endif #else #ifdef _OPENMP UINT threshold = 13; if (dim < (((ITYPE)1) << threshold)) { Y_gate_single_unroll(target_qubit_index, state, dim); } else { Y_gate_parallel_unroll(target_qubit_index, state, dim); } #else Y_gate_single_unroll(target_qubit_index, state, dim); #endif #endif } void Y_gate_single_unroll(UINT target_qubit_index, CTYPE *state, ITYPE dim) { const ITYPE loop_dim = dim / 2; const ITYPE mask = (1ULL << target_qubit_index); const ITYPE mask_low = mask - 1; const ITYPE mask_high = ~mask_low; ITYPE state_index = 0; const CTYPE imag = 1.i; if (target_qubit_index == 0) { ITYPE basis_index; for (basis_index = 0; basis_index < dim; basis_index += 2) { CTYPE temp0 = state[basis_index]; state[basis_index] = -imag * state[basis_index + 1]; state[basis_index + 1] = imag * temp0; } } else { for (state_index = 0; state_index < loop_dim; state_index += 2) { ITYPE basis_index_0 = (state_index&mask_low) + ((state_index&mask_high) << 1); ITYPE basis_index_1 = basis_index_0 + mask; CTYPE temp0 = state[basis_index_0]; CTYPE temp1 = state[basis_index_0+1]; state[basis_index_0] = -imag * state[basis_index_1]; state[basis_index_0+1] = -imag * state[basis_index_1+1]; state[basis_index_1] = imag * temp0; state[basis_index_1+1] = imag * temp1; } } } #ifdef _OPENMP void Y_gate_parallel_unroll(UINT target_qubit_index, CTYPE *state, ITYPE dim) { const ITYPE loop_dim = dim / 2; const ITYPE mask = (1ULL << target_qubit_index); const ITYPE mask_low = mask - 1; const ITYPE mask_high = ~mask_low; ITYPE state_index = 0; const CTYPE imag = 1.i; if (target_qubit_index == 0) { ITYPE basis_index; #pragma omp parallel for for (basis_index = 0; basis_index < dim; basis_index += 2) { CTYPE temp0 = state[basis_index]; state[basis_index] = -imag * state[basis_index + 1]; state[basis_index + 1] = imag * temp0; } } else { #pragma omp parallel for for (state_index = 0; state_index < loop_dim; state_index += 2) { ITYPE basis_index_0 = (state_index&mask_low) + ((state_index&mask_high) << 1); ITYPE basis_index_1 = basis_index_0 + mask; CTYPE temp0 = state[basis_index_0]; CTYPE temp1 = state[basis_index_0 + 1]; state[basis_index_0] = -imag * state[basis_index_1]; state[basis_index_0 + 1] = -imag * state[basis_index_1 + 1]; state[basis_index_1] = imag * temp0; state[basis_index_1 + 1] = imag * temp1; } } } #endif #ifdef _USE_SIMD void Y_gate_single_simd(UINT target_qubit_index, CTYPE *state, ITYPE dim) { const ITYPE loop_dim = dim / 2; const ITYPE mask = (1ULL << target_qubit_index); const ITYPE mask_low = mask - 1; const ITYPE mask_high = ~mask_low; ITYPE state_index = 0; //const CTYPE imag = 1.i; __m256d minus_even = _mm256_set_pd(1, -1, 1, -1); __m256d minus_odd = _mm256_set_pd(-1, 1, -1, 1); __m256d minus_half = _mm256_set_pd(1, -1, -1, 1); if (target_qubit_index == 0) { ITYPE basis_index = 0; for (basis_index = 0; basis_index < dim; basis_index += 2) { double* ptr0 = (double*)(state + basis_index); __m256d data0 = _mm256_loadu_pd(ptr0); data0 = _mm256_permute4x64_pd(data0, 27); // (3210) -> (0123) : 16+4*2+3=27 data0 = _mm256_mul_pd(data0, minus_half); _mm256_storeu_pd(ptr0, data0); } } else { for (state_index = 0; state_index < loop_dim; state_index += 2) { ITYPE basis_index_0 = (state_index&mask_low) + ((state_index&mask_high) << 1); ITYPE basis_index_1 = basis_index_0 + mask; double* ptr0 = (double*)(state + basis_index_0); double* ptr1 = (double*)(state + basis_index_1); __m256d data0 = _mm256_loadu_pd(ptr0); __m256d data1 = _mm256_loadu_pd(ptr1); data0 = _mm256_permute_pd(data0, 5); // (3210) -> (2301) : 4+1 data1 = _mm256_permute_pd(data1, 5); data0 = _mm256_mul_pd(data0, minus_even); data1 = _mm256_mul_pd(data1, minus_odd); _mm256_storeu_pd(ptr1, data0); _mm256_storeu_pd(ptr0, data1); } } } #ifdef _OPENMP void Y_gate_parallel_simd(UINT target_qubit_index, CTYPE *state, ITYPE dim) { const ITYPE loop_dim = dim / 2; const ITYPE mask = (1ULL << target_qubit_index); const ITYPE mask_low = mask - 1; const ITYPE mask_high = ~mask_low; ITYPE state_index = 0; //const CTYPE imag = 1.i; __m256d minus_even = _mm256_set_pd(1, -1, 1, -1); __m256d minus_odd = _mm256_set_pd(-1, 1, -1, 1); __m256d minus_half = _mm256_set_pd(1, -1, -1, 1); if (target_qubit_index == 0) { ITYPE basis_index = 0; #pragma omp parallel for for (basis_index = 0; basis_index < dim; basis_index += 2) { double* ptr0 = (double*)(state + basis_index); __m256d data0 = _mm256_loadu_pd(ptr0); data0 = _mm256_permute4x64_pd(data0, 27); // (3210) -> (0123) : 16+4*2+3=27 data0 = _mm256_mul_pd(data0, minus_half); _mm256_storeu_pd(ptr0, data0); } } else { #pragma omp parallel for for (state_index = 0; state_index < loop_dim; state_index += 2) { ITYPE basis_index_0 = (state_index&mask_low) + ((state_index&mask_high) << 1); ITYPE basis_index_1 = basis_index_0 + mask; double* ptr0 = (double*)(state + basis_index_0); double* ptr1 = (double*)(state + basis_index_1); __m256d data0 = _mm256_loadu_pd(ptr0); __m256d data1 = _mm256_loadu_pd(ptr1); data0 = _mm256_permute_pd(data0, 5); // (3210) -> (2301) : 4+1 data1 = _mm256_permute_pd(data1, 5); data0 = _mm256_mul_pd(data0, minus_even); data1 = _mm256_mul_pd(data1, minus_odd); _mm256_storeu_pd(ptr1, data0); _mm256_storeu_pd(ptr0, data1); } } } #endif #endif /* #ifdef _OPENMP void Y_gate_parallel(UINT target_qubit_index, CTYPE *state, ITYPE dim) { const ITYPE loop_dim = dim / 2; const ITYPE mask = (1ULL << target_qubit_index); const ITYPE mask_low = mask - 1; const ITYPE mask_high = ~mask_low; ITYPE state_index = 0; const CTYPE imag = 1.i; #pragma omp parallel for for (state_index = 0; state_index < loop_dim; ++state_index) { ITYPE basis_index_0 = (state_index&mask_low) + ((state_index&mask_high) << 1); ITYPE basis_index_1 = basis_index_0 + mask; CTYPE temp = state[basis_index_0]; state[basis_index_0] = -imag * state[basis_index_1]; state[basis_index_0] = imag * temp; } } #endif void Y_gate_old_single(UINT target_qubit_index, CTYPE *state, ITYPE dim) { const ITYPE loop_dim = dim / 2; const ITYPE mask = (1ULL << target_qubit_index); ITYPE state_index; for (state_index = 0; state_index < loop_dim; ++state_index) { ITYPE basis_index_0 = insert_zero_to_basis_index(state_index, mask, target_qubit_index); ITYPE basis_index_1 = basis_index_0 ^ mask; CTYPE cval_0 = state[basis_index_0]; CTYPE cval_1 = state[basis_index_1]; state[basis_index_0] = -cval_1 * 1.i; state[basis_index_1] = cval_0 * 1.i; } } void Y_gate_old_parallel(UINT target_qubit_index, CTYPE *state, ITYPE dim) { const ITYPE loop_dim = dim / 2; const ITYPE mask = (1ULL << target_qubit_index); ITYPE state_index; #ifdef _OPENMP #pragma omp parallel for #endif for (state_index = 0; state_index < loop_dim; ++state_index) { ITYPE basis_index_0 = insert_zero_to_basis_index(state_index, mask, target_qubit_index); ITYPE basis_index_1 = basis_index_0 ^ mask; CTYPE cval_0 = state[basis_index_0]; CTYPE cval_1 = state[basis_index_1]; state[basis_index_0] = -cval_1 * 1.i; state[basis_index_1] = cval_0 * 1.i; } } void Y_gate_single(UINT target_qubit_index, CTYPE *state, ITYPE dim) { const ITYPE loop_dim = dim / 2; const ITYPE mask = (1ULL << target_qubit_index); const ITYPE mask_low = mask - 1; const ITYPE mask_high = ~mask_low; ITYPE state_index = 0; const CTYPE imag = 1.i; for (state_index = 0; state_index < loop_dim; ++state_index) { ITYPE basis_index_0 = (state_index&mask_low) + ((state_index&mask_high) << 1); ITYPE basis_index_1 = basis_index_0 + mask; CTYPE temp = state[basis_index_0]; state[basis_index_0] = - imag * state[basis_index_1]; state[basis_index_0] = imag * temp; } } */
csr.c
/*! * \file * * \brief Various routines with dealing with CSR matrices * * \author George Karypis * \version\verbatim $Id: csr.c 13437 2013-01-11 21:54:10Z karypis $ \endverbatim */ #include <GKlib.h> #define OMPMINOPS 50000 /*************************************************************************/ /*! Allocate memory for a CSR matrix and initializes it \returns the allocated matrix. The various fields are set to NULL. */ /**************************************************************************/ gk_csr_t *gk_csr_Create() { gk_csr_t *mat; mat = (gk_csr_t *)gk_malloc(sizeof(gk_csr_t), "gk_csr_Create: mat"); gk_csr_Init(mat); return mat; } /*************************************************************************/ /*! Initializes the matrix \param mat is the matrix to be initialized. */ /*************************************************************************/ void gk_csr_Init(gk_csr_t *mat) { memset(mat, 0, sizeof(gk_csr_t)); mat->nrows = mat->ncols = -1; } /*************************************************************************/ /*! Frees all the memory allocated for matrix. \param mat is the matrix to be freed. */ /*************************************************************************/ void gk_csr_Free(gk_csr_t **mat) { if (*mat == NULL) return; gk_csr_FreeContents(*mat); gk_free((void **)mat, LTERM); } /*************************************************************************/ /*! Frees only the memory allocated for the matrix's different fields and sets them to NULL. \param mat is the matrix whose contents will be freed. */ /*************************************************************************/ void gk_csr_FreeContents(gk_csr_t *mat) { gk_free((void *)&mat->rowptr, &mat->rowind, &mat->rowval, &mat->rowids, &mat->colptr, &mat->colind, &mat->colval, &mat->colids, &mat->rnorms, &mat->cnorms, &mat->rsums, &mat->csums, &mat->rsizes, &mat->csizes, &mat->rvols, &mat->cvols, &mat->rwgts, &mat->cwgts, LTERM); } /*************************************************************************/ /*! Returns a copy of a matrix. \param mat is the matrix to be duplicated. \returns the newly created copy of the matrix. */ /**************************************************************************/ gk_csr_t *gk_csr_Dup(gk_csr_t *mat) { gk_csr_t *nmat; nmat = gk_csr_Create(); nmat->nrows = mat->nrows; nmat->ncols = mat->ncols; /* copy the row structure */ if (mat->rowptr) nmat->rowptr = gk_zcopy(mat->nrows+1, mat->rowptr, gk_zmalloc(mat->nrows+1, "gk_csr_Dup: rowptr")); if (mat->rowids) nmat->rowids = gk_icopy(mat->nrows, mat->rowids, gk_imalloc(mat->nrows, "gk_csr_Dup: rowids")); if (mat->rnorms) nmat->rnorms = gk_fcopy(mat->nrows, mat->rnorms, gk_fmalloc(mat->nrows, "gk_csr_Dup: rnorms")); if (mat->rowind) nmat->rowind = gk_icopy(mat->rowptr[mat->nrows], mat->rowind, gk_imalloc(mat->rowptr[mat->nrows], "gk_csr_Dup: rowind")); if (mat->rowval) nmat->rowval = gk_fcopy(mat->rowptr[mat->nrows], mat->rowval, gk_fmalloc(mat->rowptr[mat->nrows], "gk_csr_Dup: rowval")); /* copy the col structure */ if (mat->colptr) nmat->colptr = gk_zcopy(mat->ncols+1, mat->colptr, gk_zmalloc(mat->ncols+1, "gk_csr_Dup: colptr")); if (mat->colids) nmat->colids = gk_icopy(mat->ncols, mat->colids, gk_imalloc(mat->ncols, "gk_csr_Dup: colids")); if (mat->cnorms) nmat->cnorms = gk_fcopy(mat->ncols, mat->cnorms, gk_fmalloc(mat->ncols, "gk_csr_Dup: cnorms")); if (mat->colind) nmat->colind = gk_icopy(mat->colptr[mat->ncols], mat->colind, gk_imalloc(mat->colptr[mat->ncols], "gk_csr_Dup: colind")); if (mat->colval) nmat->colval = gk_fcopy(mat->colptr[mat->ncols], mat->colval, gk_fmalloc(mat->colptr[mat->ncols], "gk_csr_Dup: colval")); return nmat; } /*************************************************************************/ /*! Returns a submatrix containint a set of consecutive rows. \param mat is the original matrix. \param rstart is the starting row. \param nrows is the number of rows from rstart to extract. \returns the row structure of the newly created submatrix. */ /**************************************************************************/ gk_csr_t *gk_csr_ExtractSubmatrix(gk_csr_t *mat, int rstart, int nrows) { ssize_t i; gk_csr_t *nmat; if (rstart+nrows > mat->nrows) return NULL; nmat = gk_csr_Create(); nmat->nrows = nrows; nmat->ncols = mat->ncols; /* copy the row structure */ if (mat->rowptr) nmat->rowptr = gk_zcopy(nrows+1, mat->rowptr+rstart, gk_zmalloc(nrows+1, "gk_csr_ExtractSubmatrix: rowptr")); for (i=nrows; i>=0; i--) nmat->rowptr[i] -= nmat->rowptr[0]; ASSERT(nmat->rowptr[0] == 0); if (mat->rowids) nmat->rowids = gk_icopy(nrows, mat->rowids+rstart, gk_imalloc(nrows, "gk_csr_ExtractSubmatrix: rowids")); if (mat->rnorms) nmat->rnorms = gk_fcopy(nrows, mat->rnorms+rstart, gk_fmalloc(nrows, "gk_csr_ExtractSubmatrix: rnorms")); if (mat->rsums) nmat->rsums = gk_fcopy(nrows, mat->rsums+rstart, gk_fmalloc(nrows, "gk_csr_ExtractSubmatrix: rsums")); ASSERT(nmat->rowptr[nrows] == mat->rowptr[rstart+nrows]-mat->rowptr[rstart]); if (mat->rowind) nmat->rowind = gk_icopy(mat->rowptr[rstart+nrows]-mat->rowptr[rstart], mat->rowind+mat->rowptr[rstart], gk_imalloc(mat->rowptr[rstart+nrows]-mat->rowptr[rstart], "gk_csr_ExtractSubmatrix: rowind")); if (mat->rowval) nmat->rowval = gk_fcopy(mat->rowptr[rstart+nrows]-mat->rowptr[rstart], mat->rowval+mat->rowptr[rstart], gk_fmalloc(mat->rowptr[rstart+nrows]-mat->rowptr[rstart], "gk_csr_ExtractSubmatrix: rowval")); return nmat; } /*************************************************************************/ /*! Returns a submatrix containing a certain set of rows. \param mat is the original matrix. \param nrows is the number of rows to extract. \param rind is the set of row numbers to extract. \returns the row structure of the newly created submatrix. */ /**************************************************************************/ gk_csr_t *gk_csr_ExtractRows(gk_csr_t *mat, int nrows, int *rind) { ssize_t i, ii, j, nnz; gk_csr_t *nmat; nmat = gk_csr_Create(); nmat->nrows = nrows; nmat->ncols = mat->ncols; for (nnz=0, i=0; i<nrows; i++) nnz += mat->rowptr[rind[i]+1]-mat->rowptr[rind[i]]; nmat->rowptr = gk_zmalloc(nmat->nrows+1, "gk_csr_ExtractPartition: rowptr"); nmat->rowind = gk_imalloc(nnz, "gk_csr_ExtractPartition: rowind"); nmat->rowval = gk_fmalloc(nnz, "gk_csr_ExtractPartition: rowval"); nmat->rowptr[0] = 0; for (nnz=0, j=0, ii=0; ii<nrows; ii++) { i = rind[ii]; gk_icopy(mat->rowptr[i+1]-mat->rowptr[i], mat->rowind+mat->rowptr[i], nmat->rowind+nnz); gk_fcopy(mat->rowptr[i+1]-mat->rowptr[i], mat->rowval+mat->rowptr[i], nmat->rowval+nnz); nnz += mat->rowptr[i+1]-mat->rowptr[i]; nmat->rowptr[++j] = nnz; } ASSERT(j == nmat->nrows); return nmat; } /*************************************************************************/ /*! Returns a submatrix corresponding to a specified partitioning of rows. \param mat is the original matrix. \param part is the partitioning vector of the rows. \param pid is the partition ID that will be extracted. \returns the row structure of the newly created submatrix. */ /**************************************************************************/ gk_csr_t *gk_csr_ExtractPartition(gk_csr_t *mat, int *part, int pid) { ssize_t i, j, nnz; gk_csr_t *nmat; nmat = gk_csr_Create(); nmat->nrows = 0; nmat->ncols = mat->ncols; for (nnz=0, i=0; i<mat->nrows; i++) { if (part[i] == pid) { nmat->nrows++; nnz += mat->rowptr[i+1]-mat->rowptr[i]; } } nmat->rowptr = gk_zmalloc(nmat->nrows+1, "gk_csr_ExtractPartition: rowptr"); nmat->rowind = gk_imalloc(nnz, "gk_csr_ExtractPartition: rowind"); nmat->rowval = gk_fmalloc(nnz, "gk_csr_ExtractPartition: rowval"); nmat->rowptr[0] = 0; for (nnz=0, j=0, i=0; i<mat->nrows; i++) { if (part[i] == pid) { gk_icopy(mat->rowptr[i+1]-mat->rowptr[i], mat->rowind+mat->rowptr[i], nmat->rowind+nnz); gk_fcopy(mat->rowptr[i+1]-mat->rowptr[i], mat->rowval+mat->rowptr[i], nmat->rowval+nnz); nnz += mat->rowptr[i+1]-mat->rowptr[i]; nmat->rowptr[++j] = nnz; } } ASSERT(j == nmat->nrows); return nmat; } /*************************************************************************/ /*! Splits the matrix into multiple sub-matrices based on the provided color array. \param mat is the original matrix. \param color is an array of size equal to the number of non-zeros in the matrix (row-wise structure). The matrix is split into as many parts as the number of colors. For meaningfull results, the colors should be numbered consecutively starting from 0. \returns an array of matrices for each supplied color number. */ /**************************************************************************/ gk_csr_t **gk_csr_Split(gk_csr_t *mat, int *color) { ssize_t i, j; int nrows, ncolors; ssize_t *rowptr; int *rowind; float *rowval; gk_csr_t **smats; nrows = mat->nrows; rowptr = mat->rowptr; rowind = mat->rowind; rowval = mat->rowval; ncolors = gk_imax(rowptr[nrows], color)+1; smats = (gk_csr_t **)gk_malloc(sizeof(gk_csr_t *)*ncolors, "gk_csr_Split: smats"); for (i=0; i<ncolors; i++) { smats[i] = gk_csr_Create(); smats[i]->nrows = mat->nrows; smats[i]->ncols = mat->ncols; smats[i]->rowptr = gk_zsmalloc(nrows+1, 0, "gk_csr_Split: smats[i]->rowptr"); } for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) smats[color[j]]->rowptr[i]++; } for (i=0; i<ncolors; i++) MAKECSR(j, nrows, smats[i]->rowptr); for (i=0; i<ncolors; i++) { smats[i]->rowind = gk_imalloc(smats[i]->rowptr[nrows], "gk_csr_Split: smats[i]->rowind"); smats[i]->rowval = gk_fmalloc(smats[i]->rowptr[nrows], "gk_csr_Split: smats[i]->rowval"); } for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) { smats[color[j]]->rowind[smats[color[j]]->rowptr[i]] = rowind[j]; smats[color[j]]->rowval[smats[color[j]]->rowptr[i]] = rowval[j]; smats[color[j]]->rowptr[i]++; } } for (i=0; i<ncolors; i++) SHIFTCSR(j, nrows, smats[i]->rowptr); return smats; } /**************************************************************************/ /*! Reads a CSR matrix from the supplied file and stores it the matrix's forward structure. \param filename is the file that stores the data. \param format is either GK_CSR_FMT_METIS, GK_CSR_FMT_CLUTO, GK_CSR_FMT_CSR, GK_CSR_FMT_BINROW, GK_CSR_FMT_BINCOL specifying the type of the input format. The GK_CSR_FMT_CSR does not contain a header line, whereas the GK_CSR_FMT_BINROW is a binary format written by gk_csr_Write() using the same format specifier. \param readvals is either 1 or 0, indicating if the CSR file contains values or it does not. It only applies when GK_CSR_FMT_CSR is used. \param numbering is either 1 or 0, indicating if the numbering of the indices start from 1 or 0, respectively. If they start from 1, they are automatically decreamented during input so that they will start from 0. It only applies when GK_CSR_FMT_CSR is used. \returns the matrix that was read. */ /**************************************************************************/ gk_csr_t *gk_csr_Read(char *filename, int format, int readvals, int numbering) { ssize_t i, k, l; size_t nfields, nrows, ncols, nnz, fmt, ncon = 0; size_t lnlen; ssize_t *rowptr; int *rowind, ival; float *rowval=NULL, fval; int readsizes, readwgts; char *line=NULL, *head, *tail, fmtstr[256]; FILE *fpin; gk_csr_t *mat=NULL; if (!gk_fexists(filename)) gk_errexit(SIGERR, "File %s does not exist!\n", filename); if (format == GK_CSR_FMT_BINROW) { mat = gk_csr_Create(); fpin = gk_fopen(filename, "rb", "gk_csr_Read: fpin"); if (fread(&(mat->nrows), sizeof(int32_t), 1, fpin) != 1) gk_errexit(SIGERR, "Failed to read the nrows from file %s!\n", filename); if (fread(&(mat->ncols), sizeof(int32_t), 1, fpin) != 1) gk_errexit(SIGERR, "Failed to read the ncols from file %s!\n", filename); mat->rowptr = gk_zmalloc(mat->nrows+1, "gk_csr_Read: rowptr"); if (fread(mat->rowptr, sizeof(ssize_t), mat->nrows+1, fpin) != mat->nrows+1) gk_errexit(SIGERR, "Failed to read the rowptr from file %s!\n", filename); mat->rowind = gk_imalloc(mat->rowptr[mat->nrows], "gk_csr_Read: rowind"); if (fread(mat->rowind, sizeof(int32_t), mat->rowptr[mat->nrows], fpin) != mat->rowptr[mat->nrows]) gk_errexit(SIGERR, "Failed to read the rowind from file %s!\n", filename); if (readvals == 1) { mat->rowval = gk_fmalloc(mat->rowptr[mat->nrows], "gk_csr_Read: rowval"); if (fread(mat->rowval, sizeof(float), mat->rowptr[mat->nrows], fpin) != mat->rowptr[mat->nrows]) gk_errexit(SIGERR, "Failed to read the rowval from file %s!\n", filename); } gk_fclose(fpin); return mat; } if (format == GK_CSR_FMT_BINCOL) { mat = gk_csr_Create(); fpin = gk_fopen(filename, "rb", "gk_csr_Read: fpin"); if (fread(&(mat->nrows), sizeof(int32_t), 1, fpin) != 1) gk_errexit(SIGERR, "Failed to read the nrows from file %s!\n", filename); if (fread(&(mat->ncols), sizeof(int32_t), 1, fpin) != 1) gk_errexit(SIGERR, "Failed to read the ncols from file %s!\n", filename); mat->colptr = gk_zmalloc(mat->ncols+1, "gk_csr_Read: colptr"); if (fread(mat->colptr, sizeof(ssize_t), mat->ncols+1, fpin) != mat->ncols+1) gk_errexit(SIGERR, "Failed to read the colptr from file %s!\n", filename); mat->colind = gk_imalloc(mat->colptr[mat->ncols], "gk_csr_Read: colind"); if (fread(mat->colind, sizeof(int32_t), mat->colptr[mat->ncols], fpin) != mat->colptr[mat->ncols]) gk_errexit(SIGERR, "Failed to read the colind from file %s!\n", filename); if (readvals) { mat->colval = gk_fmalloc(mat->colptr[mat->ncols], "gk_csr_Read: colval"); if (fread(mat->colval, sizeof(float), mat->colptr[mat->ncols], fpin) != mat->colptr[mat->ncols]) gk_errexit(SIGERR, "Failed to read the colval from file %s!\n", filename); } gk_fclose(fpin); return mat; } if (format == GK_CSR_FMT_CLUTO) { fpin = gk_fopen(filename, "r", "gk_csr_Read: fpin"); do { if (gk_getline(&line, &lnlen, fpin) <= 0) gk_errexit(SIGERR, "Premature end of input file: file:%s\n", filename); } while (line[0] == '%'); if (sscanf(line, "%zu %zu %zu", &nrows, &ncols, &nnz) != 3) gk_errexit(SIGERR, "Header line must contain 3 integers.\n"); readsizes = 0; readwgts = 0; readvals = 1; numbering = 1; } else if (format == GK_CSR_FMT_METIS) { fpin = gk_fopen(filename, "r", "gk_csr_Read: fpin"); do { if (gk_getline(&line, &lnlen, fpin) <= 0) gk_errexit(SIGERR, "Premature end of input file: file:%s\n", filename); } while (line[0] == '%'); fmt = ncon = 0; nfields = sscanf(line, "%zu %zu %zu %zu", &nrows, &nnz, &fmt, &ncon); if (nfields < 2) gk_errexit(SIGERR, "Header line must contain at least 2 integers (#vtxs and #edges).\n"); ncols = nrows; nnz *= 2; if (fmt > 111) gk_errexit(SIGERR, "Cannot read this type of file format [fmt=%zu]!\n", fmt); sprintf(fmtstr, "%03zu", fmt%1000); readsizes = (fmtstr[0] == '1'); readwgts = (fmtstr[1] == '1'); readvals = (fmtstr[2] == '1'); numbering = 1; ncon = (ncon == 0 ? 1 : ncon); } else { readsizes = 0; readwgts = 0; gk_getfilestats(filename, &nrows, &nnz, NULL, NULL); if (readvals == 1 && nnz%2 == 1) gk_errexit(SIGERR, "Error: The number of numbers (%zd %d) in the input file is not even.\n", nnz, readvals); if (readvals == 1) nnz = nnz/2; fpin = gk_fopen(filename, "r", "gk_csr_Read: fpin"); } mat = gk_csr_Create(); mat->nrows = nrows; rowptr = mat->rowptr = gk_zmalloc(nrows+1, "gk_csr_Read: rowptr"); rowind = mat->rowind = gk_imalloc(nnz, "gk_csr_Read: rowind"); if (readvals != 2) rowval = mat->rowval = gk_fsmalloc(nnz, 1.0, "gk_csr_Read: rowval"); if (readsizes) mat->rsizes = gk_fsmalloc(nrows, 0.0, "gk_csr_Read: rsizes"); if (readwgts) mat->rwgts = gk_fsmalloc(nrows*ncon, 0.0, "gk_csr_Read: rwgts"); /*---------------------------------------------------------------------- * Read the sparse matrix file *---------------------------------------------------------------------*/ numbering = (numbering ? - 1 : 0); for (ncols=0, rowptr[0]=0, k=0, i=0; i<nrows; i++) { do { if (gk_getline(&line, &lnlen, fpin) == -1) gk_errexit(SIGERR, "Premature end of input file: file while reading row %d\n", i); } while (line[0] == '%'); head = line; tail = NULL; /* Read vertex sizes */ if (readsizes) { #ifdef __MSC__ mat->rsizes[i] = (float)strtod(head, &tail); #else mat->rsizes[i] = strtof(head, &tail); #endif if (tail == head) gk_errexit(SIGERR, "The line for vertex %zd does not have size information\n", i+1); if (mat->rsizes[i] < 0) errexit("The size for vertex %zd must be >= 0\n", i+1); head = tail; } /* Read vertex weights */ if (readwgts) { for (l=0; l<ncon; l++) { #ifdef __MSC__ mat->rwgts[i*ncon+l] = (float)strtod(head, &tail); #else mat->rwgts[i*ncon+l] = strtof(head, &tail); #endif if (tail == head) errexit("The line for vertex %zd does not have enough weights " "for the %d constraints.\n", i+1, ncon); if (mat->rwgts[i*ncon+l] < 0) errexit("The weight vertex %zd and constraint %zd must be >= 0\n", i+1, l); head = tail; } } /* Read the rest of the row */ while (1) { ival = (int)strtol(head, &tail, 0); if (tail == head) break; head = tail; if ((rowind[k] = ival + numbering) < 0) gk_errexit(SIGERR, "Error: Invalid column number %d at row %zd.\n", ival, i); ncols = gk_max(rowind[k], ncols); if (readvals == 1) { #ifdef __MSC__ fval = (float)strtod(head, &tail); #else fval = strtof(head, &tail); #endif if (tail == head) gk_errexit(SIGERR, "Value could not be found for column! Row:%zd, NNZ:%zd\n", i, k); head = tail; rowval[k] = fval; } k++; } rowptr[i+1] = k; } if (format == GK_CSR_FMT_METIS) { ASSERT(ncols+1 == mat->nrows); mat->ncols = mat->nrows; } else { mat->ncols = ncols+1; } if (k != nnz) gk_errexit(SIGERR, "gk_csr_Read: Something wrong with the number of nonzeros in " "the input file. NNZ=%zd, ActualNNZ=%zd.\n", nnz, k); gk_fclose(fpin); gk_free((void **)&line, LTERM); return mat; } /**************************************************************************/ /*! Writes the row-based structure of a matrix into a file. \param mat is the matrix to be written, \param filename is the name of the output file. \param format is one of: GK_CSR_FMT_CLUTO, GK_CSR_FMT_CSR, GK_CSR_FMT_BINROW, GK_CSR_FMT_BINCOL. \param writevals is either 1 or 0 indicating if the values will be written or not. This is only applicable when GK_CSR_FMT_CSR is used. \param numbering is either 1 or 0 indicating if the internal 0-based numbering will be shifted by one or not during output. This is only applicable when GK_CSR_FMT_CSR is used. */ /**************************************************************************/ void gk_csr_Write(gk_csr_t *mat, char *filename, int format, int writevals, int numbering) { ssize_t i, j; FILE *fpout; if (format == GK_CSR_FMT_BINROW) { if (filename == NULL) gk_errexit(SIGERR, "The filename parameter cannot be NULL.\n"); fpout = gk_fopen(filename, "wb", "gk_csr_Write: fpout"); fwrite(&(mat->nrows), sizeof(int32_t), 1, fpout); fwrite(&(mat->ncols), sizeof(int32_t), 1, fpout); fwrite(mat->rowptr, sizeof(ssize_t), mat->nrows+1, fpout); fwrite(mat->rowind, sizeof(int32_t), mat->rowptr[mat->nrows], fpout); if (writevals) fwrite(mat->rowval, sizeof(float), mat->rowptr[mat->nrows], fpout); gk_fclose(fpout); return; } if (format == GK_CSR_FMT_BINCOL) { if (filename == NULL) gk_errexit(SIGERR, "The filename parameter cannot be NULL.\n"); fpout = gk_fopen(filename, "wb", "gk_csr_Write: fpout"); fwrite(&(mat->nrows), sizeof(int32_t), 1, fpout); fwrite(&(mat->ncols), sizeof(int32_t), 1, fpout); fwrite(mat->colptr, sizeof(ssize_t), mat->ncols+1, fpout); fwrite(mat->colind, sizeof(int32_t), mat->colptr[mat->ncols], fpout); if (writevals) fwrite(mat->colval, sizeof(float), mat->colptr[mat->ncols], fpout); gk_fclose(fpout); return; } if (filename) fpout = gk_fopen(filename, "w", "gk_csr_Write: fpout"); else fpout = stdout; if (format == GK_CSR_FMT_CLUTO) { fprintf(fpout, "%d %d %zd\n", mat->nrows, mat->ncols, mat->rowptr[mat->nrows]); writevals = 1; numbering = 1; } for (i=0; i<mat->nrows; i++) { for (j=mat->rowptr[i]; j<mat->rowptr[i+1]; j++) { fprintf(fpout, " %d", mat->rowind[j]+(numbering ? 1 : 0)); if (writevals) fprintf(fpout, " %f", mat->rowval[j]); } fprintf(fpout, "\n"); } if (filename) gk_fclose(fpout); } /*************************************************************************/ /*! Prunes certain rows/columns of the matrix. The prunning takes place by analyzing the row structure of the matrix. The prunning takes place by removing rows/columns but it does not affect the numbering of the remaining rows/columns. \param mat the matrix to be prunned, \param what indicates if the rows (GK_CSR_ROW) or the columns (GK_CSR_COL) of the matrix will be prunned, \param minf is the minimum number of rows (columns) that a column (row) must be present in order to be kept, \param maxf is the maximum number of rows (columns) that a column (row) must be present at in order to be kept. \returns the prunned matrix consisting only of its row-based structure. The input matrix is not modified. */ /**************************************************************************/ gk_csr_t *gk_csr_Prune(gk_csr_t *mat, int what, int minf, int maxf) { ssize_t i, j, nnz; int nrows, ncols; ssize_t *rowptr, *nrowptr; int *rowind, *nrowind, *collen; float *rowval, *nrowval; gk_csr_t *nmat; nmat = gk_csr_Create(); nrows = nmat->nrows = mat->nrows; ncols = nmat->ncols = mat->ncols; rowptr = mat->rowptr; rowind = mat->rowind; rowval = mat->rowval; nrowptr = nmat->rowptr = gk_zmalloc(nrows+1, "gk_csr_Prune: nrowptr"); nrowind = nmat->rowind = gk_imalloc(rowptr[nrows], "gk_csr_Prune: nrowind"); nrowval = nmat->rowval = gk_fmalloc(rowptr[nrows], "gk_csr_Prune: nrowval"); switch (what) { case GK_CSR_COL: collen = gk_ismalloc(ncols, 0, "gk_csr_Prune: collen"); for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) { ASSERT(rowind[j] < ncols); collen[rowind[j]]++; } } for (i=0; i<ncols; i++) collen[i] = (collen[i] >= minf && collen[i] <= maxf ? 1 : 0); nrowptr[0] = 0; for (nnz=0, i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) { if (collen[rowind[j]]) { nrowind[nnz] = rowind[j]; nrowval[nnz] = rowval[j]; nnz++; } } nrowptr[i+1] = nnz; } gk_free((void **)&collen, LTERM); break; case GK_CSR_ROW: nrowptr[0] = 0; for (nnz=0, i=0; i<nrows; i++) { if (rowptr[i+1]-rowptr[i] >= minf && rowptr[i+1]-rowptr[i] <= maxf) { for (j=rowptr[i]; j<rowptr[i+1]; j++, nnz++) { nrowind[nnz] = rowind[j]; nrowval[nnz] = rowval[j]; } } nrowptr[i+1] = nnz; } break; default: gk_csr_Free(&nmat); gk_errexit(SIGERR, "Unknown prunning type of %d\n", what); return NULL; } return nmat; } /*************************************************************************/ /*! Eliminates certain entries from the rows/columns of the matrix. The filtering takes place by keeping only the highest weight entries whose sum accounts for a certain fraction of the overall weight of the row/column. \param mat the matrix to be prunned, \param what indicates if the rows (GK_CSR_ROW) or the columns (GK_CSR_COL) of the matrix will be prunned, \param norm indicates the norm that will be used to aggregate the weights and possible values are 1 or 2, \param fraction is the fraction of the overall norm that will be retained by the kept entries. \returns the filtered matrix consisting only of its row-based structure. The input matrix is not modified. */ /**************************************************************************/ gk_csr_t *gk_csr_LowFilter(gk_csr_t *mat, int what, int norm, float fraction) { ssize_t i, j, nnz; int nrows, ncols, ncand, maxlen=0; ssize_t *rowptr, *colptr, *nrowptr; int *rowind, *colind, *nrowind; float *rowval, *colval, *nrowval, rsum, tsum; gk_csr_t *nmat; gk_fkv_t *cand; nmat = gk_csr_Create(); nrows = nmat->nrows = mat->nrows; ncols = nmat->ncols = mat->ncols; rowptr = mat->rowptr; rowind = mat->rowind; rowval = mat->rowval; colptr = mat->colptr; colind = mat->colind; colval = mat->colval; nrowptr = nmat->rowptr = gk_zmalloc(nrows+1, "gk_csr_LowFilter: nrowptr"); nrowind = nmat->rowind = gk_imalloc(rowptr[nrows], "gk_csr_LowFilter: nrowind"); nrowval = nmat->rowval = gk_fmalloc(rowptr[nrows], "gk_csr_LowFilter: nrowval"); switch (what) { case GK_CSR_COL: if (mat->colptr == NULL) gk_errexit(SIGERR, "Cannot filter columns when column-based structure has not been created.\n"); gk_zcopy(nrows+1, rowptr, nrowptr); for (i=0; i<ncols; i++) maxlen = gk_max(maxlen, colptr[i+1]-colptr[i]); #pragma omp parallel private(i, j, ncand, rsum, tsum, cand) { cand = gk_fkvmalloc(maxlen, "gk_csr_LowFilter: cand"); #pragma omp for schedule(static) for (i=0; i<ncols; i++) { for (tsum=0.0, ncand=0, j=colptr[i]; j<colptr[i+1]; j++, ncand++) { cand[ncand].val = colind[j]; cand[ncand].key = colval[j]; tsum += (norm == 1 ? colval[j] : colval[j]*colval[j]); } gk_fkvsortd(ncand, cand); for (rsum=0.0, j=0; j<ncand && rsum<=fraction*tsum; j++) { rsum += (norm == 1 ? cand[j].key : cand[j].key*cand[j].key); nrowind[nrowptr[cand[j].val]] = i; nrowval[nrowptr[cand[j].val]] = cand[j].key; nrowptr[cand[j].val]++; } } gk_free((void **)&cand, LTERM); } /* compact the nrowind/nrowval */ for (nnz=0, i=0; i<nrows; i++) { for (j=rowptr[i]; j<nrowptr[i]; j++, nnz++) { nrowind[nnz] = nrowind[j]; nrowval[nnz] = nrowval[j]; } nrowptr[i] = nnz; } SHIFTCSR(i, nrows, nrowptr); break; case GK_CSR_ROW: if (mat->rowptr == NULL) gk_errexit(SIGERR, "Cannot filter rows when row-based structure has not been created.\n"); for (i=0; i<nrows; i++) maxlen = gk_max(maxlen, rowptr[i+1]-rowptr[i]); #pragma omp parallel private(i, j, ncand, rsum, tsum, cand) { cand = gk_fkvmalloc(maxlen, "gk_csr_LowFilter: cand"); #pragma omp for schedule(static) for (i=0; i<nrows; i++) { for (tsum=0.0, ncand=0, j=rowptr[i]; j<rowptr[i+1]; j++, ncand++) { cand[ncand].val = rowind[j]; cand[ncand].key = rowval[j]; tsum += (norm == 1 ? rowval[j] : rowval[j]*rowval[j]); } gk_fkvsortd(ncand, cand); for (rsum=0.0, j=0; j<ncand && rsum<=fraction*tsum; j++) { rsum += (norm == 1 ? cand[j].key : cand[j].key*cand[j].key); nrowind[rowptr[i]+j] = cand[j].val; nrowval[rowptr[i]+j] = cand[j].key; } nrowptr[i+1] = rowptr[i]+j; } gk_free((void **)&cand, LTERM); } /* compact nrowind/nrowval */ nrowptr[0] = nnz = 0; for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<nrowptr[i+1]; j++, nnz++) { nrowind[nnz] = nrowind[j]; nrowval[nnz] = nrowval[j]; } nrowptr[i+1] = nnz; } break; default: gk_csr_Free(&nmat); gk_errexit(SIGERR, "Unknown prunning type of %d\n", what); return NULL; } return nmat; } /*************************************************************************/ /*! Eliminates certain entries from the rows/columns of the matrix. The filtering takes place by keeping only the highest weight top-K entries along each row/column and those entries whose weight is greater than a specified value. \param mat the matrix to be prunned, \param what indicates if the rows (GK_CSR_ROW) or the columns (GK_CSR_COL) of the matrix will be prunned, \param topk is the number of the highest weight entries to keep. \param keepval is the weight of a term above which will be kept. This is used to select additional terms past the first topk. \returns the filtered matrix consisting only of its row-based structure. The input matrix is not modified. */ /**************************************************************************/ gk_csr_t *gk_csr_TopKPlusFilter(gk_csr_t *mat, int what, int topk, float keepval) { ssize_t i, j, k, nnz; int nrows, ncols, ncand; ssize_t *rowptr, *colptr, *nrowptr; int *rowind, *colind, *nrowind; float *rowval, *colval, *nrowval; gk_csr_t *nmat; gk_fkv_t *cand; nmat = gk_csr_Create(); nrows = nmat->nrows = mat->nrows; ncols = nmat->ncols = mat->ncols; rowptr = mat->rowptr; rowind = mat->rowind; rowval = mat->rowval; colptr = mat->colptr; colind = mat->colind; colval = mat->colval; nrowptr = nmat->rowptr = gk_zmalloc(nrows+1, "gk_csr_LowFilter: nrowptr"); nrowind = nmat->rowind = gk_imalloc(rowptr[nrows], "gk_csr_LowFilter: nrowind"); nrowval = nmat->rowval = gk_fmalloc(rowptr[nrows], "gk_csr_LowFilter: nrowval"); switch (what) { case GK_CSR_COL: if (mat->colptr == NULL) gk_errexit(SIGERR, "Cannot filter columns when column-based structure has not been created.\n"); cand = gk_fkvmalloc(nrows, "gk_csr_LowFilter: cand"); gk_zcopy(nrows+1, rowptr, nrowptr); for (i=0; i<ncols; i++) { for (ncand=0, j=colptr[i]; j<colptr[i+1]; j++, ncand++) { cand[ncand].val = colind[j]; cand[ncand].key = colval[j]; } gk_fkvsortd(ncand, cand); k = gk_min(topk, ncand); for (j=0; j<k; j++) { nrowind[nrowptr[cand[j].val]] = i; nrowval[nrowptr[cand[j].val]] = cand[j].key; nrowptr[cand[j].val]++; } for (; j<ncand; j++) { if (cand[j].key < keepval) break; nrowind[nrowptr[cand[j].val]] = i; nrowval[nrowptr[cand[j].val]] = cand[j].key; nrowptr[cand[j].val]++; } } /* compact the nrowind/nrowval */ for (nnz=0, i=0; i<nrows; i++) { for (j=rowptr[i]; j<nrowptr[i]; j++, nnz++) { nrowind[nnz] = nrowind[j]; nrowval[nnz] = nrowval[j]; } nrowptr[i] = nnz; } SHIFTCSR(i, nrows, nrowptr); gk_free((void **)&cand, LTERM); break; case GK_CSR_ROW: if (mat->rowptr == NULL) gk_errexit(SIGERR, "Cannot filter rows when row-based structure has not been created.\n"); cand = gk_fkvmalloc(ncols, "gk_csr_LowFilter: cand"); nrowptr[0] = 0; for (nnz=0, i=0; i<nrows; i++) { for (ncand=0, j=rowptr[i]; j<rowptr[i+1]; j++, ncand++) { cand[ncand].val = rowind[j]; cand[ncand].key = rowval[j]; } gk_fkvsortd(ncand, cand); k = gk_min(topk, ncand); for (j=0; j<k; j++, nnz++) { nrowind[nnz] = cand[j].val; nrowval[nnz] = cand[j].key; } for (; j<ncand; j++, nnz++) { if (cand[j].key < keepval) break; nrowind[nnz] = cand[j].val; nrowval[nnz] = cand[j].key; } nrowptr[i+1] = nnz; } gk_free((void **)&cand, LTERM); break; default: gk_csr_Free(&nmat); gk_errexit(SIGERR, "Unknown prunning type of %d\n", what); return NULL; } return nmat; } /*************************************************************************/ /*! Eliminates certain entries from the rows/columns of the matrix. The filtering takes place by keeping only the terms whose contribution to the total length of the document is greater than a user-splied multiple over the average. This routine assumes that the vectors are normalized to be unit length. \param mat the matrix to be prunned, \param what indicates if the rows (GK_CSR_ROW) or the columns (GK_CSR_COL) of the matrix will be prunned, \param zscore is the multiplicative factor over the average contribution to the length of the document. \returns the filtered matrix consisting only of its row-based structure. The input matrix is not modified. */ /**************************************************************************/ gk_csr_t *gk_csr_ZScoreFilter(gk_csr_t *mat, int what, float zscore) { ssize_t i, j, nnz; int nrows; ssize_t *rowptr, *nrowptr; int *rowind, *nrowind; float *rowval, *nrowval, avgwgt; gk_csr_t *nmat; nmat = gk_csr_Create(); nmat->nrows = mat->nrows; nmat->ncols = mat->ncols; nrows = mat->nrows; rowptr = mat->rowptr; rowind = mat->rowind; rowval = mat->rowval; nrowptr = nmat->rowptr = gk_zmalloc(nrows+1, "gk_csr_ZScoreFilter: nrowptr"); nrowind = nmat->rowind = gk_imalloc(rowptr[nrows], "gk_csr_ZScoreFilter: nrowind"); nrowval = nmat->rowval = gk_fmalloc(rowptr[nrows], "gk_csr_ZScoreFilter: nrowval"); switch (what) { case GK_CSR_COL: gk_errexit(SIGERR, "This has not been implemented yet.\n"); break; case GK_CSR_ROW: if (mat->rowptr == NULL) gk_errexit(SIGERR, "Cannot filter rows when row-based structure has not been created.\n"); nrowptr[0] = 0; for (nnz=0, i=0; i<nrows; i++) { avgwgt = zscore/(rowptr[i+1]-rowptr[i]); for (j=rowptr[i]; j<rowptr[i+1]; j++) { if (rowval[j] > avgwgt) { nrowind[nnz] = rowind[j]; nrowval[nnz] = rowval[j]; nnz++; } } nrowptr[i+1] = nnz; } break; default: gk_csr_Free(&nmat); gk_errexit(SIGERR, "Unknown prunning type of %d\n", what); return NULL; } return nmat; } /*************************************************************************/ /*! Compacts the column-space of the matrix by removing empty columns. As a result of the compaction, the column numbers are renumbered. The compaction operation is done in place and only affects the row-based representation of the matrix. The new columns are ordered in decreasing frequency. \param mat the matrix whose empty columns will be removed. */ /**************************************************************************/ void gk_csr_CompactColumns(gk_csr_t *mat) { ssize_t i; int nrows, ncols, nncols; ssize_t *rowptr; int *rowind, *colmap; gk_ikv_t *clens; nrows = mat->nrows; ncols = mat->ncols; rowptr = mat->rowptr; rowind = mat->rowind; colmap = gk_imalloc(ncols, "gk_csr_CompactColumns: colmap"); clens = gk_ikvmalloc(ncols, "gk_csr_CompactColumns: clens"); for (i=0; i<ncols; i++) { clens[i].key = 0; clens[i].val = i; } for (i=0; i<rowptr[nrows]; i++) clens[rowind[i]].key++; gk_ikvsortd(ncols, clens); for (nncols=0, i=0; i<ncols; i++) { if (clens[i].key > 0) colmap[clens[i].val] = nncols++; else break; } for (i=0; i<rowptr[nrows]; i++) rowind[i] = colmap[rowind[i]]; mat->ncols = nncols; gk_free((void **)&colmap, &clens, LTERM); } /*************************************************************************/ /*! Sorts the indices in increasing order \param mat the matrix itself, \param what is either GK_CSR_ROW or GK_CSR_COL indicating which set of indices to sort. */ /**************************************************************************/ void gk_csr_SortIndices(gk_csr_t *mat, int what) { int n, nn=0; ssize_t *ptr; int *ind; float *val; switch (what) { case GK_CSR_ROW: if (!mat->rowptr) gk_errexit(SIGERR, "Row-based view of the matrix does not exists.\n"); n = mat->nrows; ptr = mat->rowptr; ind = mat->rowind; val = mat->rowval; break; case GK_CSR_COL: if (!mat->colptr) gk_errexit(SIGERR, "Column-based view of the matrix does not exists.\n"); n = mat->ncols; ptr = mat->colptr; ind = mat->colind; val = mat->colval; break; default: gk_errexit(SIGERR, "Invalid index type of %d.\n", what); return; } #pragma omp parallel if (n > 100) { ssize_t i, j, k; gk_ikv_t *cand; float *tval; #pragma omp single for (i=0; i<n; i++) nn = gk_max(nn, ptr[i+1]-ptr[i]); cand = gk_ikvmalloc(nn, "gk_csr_SortIndices: cand"); tval = gk_fmalloc(nn, "gk_csr_SortIndices: tval"); #pragma omp for schedule(static) for (i=0; i<n; i++) { for (k=0, j=ptr[i]; j<ptr[i+1]; j++) { if (j > ptr[i] && ind[j] < ind[j-1]) k = 1; /* an inversion */ cand[j-ptr[i]].val = j-ptr[i]; cand[j-ptr[i]].key = ind[j]; tval[j-ptr[i]] = val[j]; } if (k) { gk_ikvsorti(ptr[i+1]-ptr[i], cand); for (j=ptr[i]; j<ptr[i+1]; j++) { ind[j] = cand[j-ptr[i]].key; val[j] = tval[cand[j-ptr[i]].val]; } } } gk_free((void **)&cand, &tval, LTERM); } } /*************************************************************************/ /*! Creates a row/column index from the column/row data. \param mat the matrix itself, \param what is either GK_CSR_ROW or GK_CSR_COL indicating which index will be created. */ /**************************************************************************/ void gk_csr_CreateIndex(gk_csr_t *mat, int what) { /* 'f' stands for forward, 'r' stands for reverse */ ssize_t i, j, k, nf, nr; ssize_t *fptr, *rptr; int *find, *rind; float *fval, *rval; switch (what) { case GK_CSR_COL: nf = mat->nrows; fptr = mat->rowptr; find = mat->rowind; fval = mat->rowval; if (mat->colptr) gk_free((void **)&mat->colptr, LTERM); if (mat->colind) gk_free((void **)&mat->colind, LTERM); if (mat->colval) gk_free((void **)&mat->colval, LTERM); nr = mat->ncols; rptr = mat->colptr = gk_zsmalloc(nr+1, 0, "gk_csr_CreateIndex: rptr"); rind = mat->colind = gk_imalloc(fptr[nf], "gk_csr_CreateIndex: rind"); rval = mat->colval = (fval ? gk_fmalloc(fptr[nf], "gk_csr_CreateIndex: rval") : NULL); break; case GK_CSR_ROW: nf = mat->ncols; fptr = mat->colptr; find = mat->colind; fval = mat->colval; if (mat->rowptr) gk_free((void **)&mat->rowptr, LTERM); if (mat->rowind) gk_free((void **)&mat->rowind, LTERM); if (mat->rowval) gk_free((void **)&mat->rowval, LTERM); nr = mat->nrows; rptr = mat->rowptr = gk_zsmalloc(nr+1, 0, "gk_csr_CreateIndex: rptr"); rind = mat->rowind = gk_imalloc(fptr[nf], "gk_csr_CreateIndex: rind"); rval = mat->rowval = (fval ? gk_fmalloc(fptr[nf], "gk_csr_CreateIndex: rval") : NULL); break; default: gk_errexit(SIGERR, "Invalid index type of %d.\n", what); return; } for (i=0; i<nf; i++) { for (j=fptr[i]; j<fptr[i+1]; j++) rptr[find[j]]++; } MAKECSR(i, nr, rptr); if (rptr[nr] > 6*nr) { for (i=0; i<nf; i++) { for (j=fptr[i]; j<fptr[i+1]; j++) rind[rptr[find[j]]++] = i; } SHIFTCSR(i, nr, rptr); if (fval) { for (i=0; i<nf; i++) { for (j=fptr[i]; j<fptr[i+1]; j++) rval[rptr[find[j]]++] = fval[j]; } SHIFTCSR(i, nr, rptr); } } else { if (fval) { for (i=0; i<nf; i++) { for (j=fptr[i]; j<fptr[i+1]; j++) { k = find[j]; rind[rptr[k]] = i; rval[rptr[k]++] = fval[j]; } } } else { for (i=0; i<nf; i++) { for (j=fptr[i]; j<fptr[i+1]; j++) rind[rptr[find[j]]++] = i; } } SHIFTCSR(i, nr, rptr); } } /*************************************************************************/ /*! Normalizes the rows/columns of the matrix to be unit length. \param mat the matrix itself, \param what indicates what will be normalized and is obtained by specifying GK_CSR_ROW, GK_CSR_COL, GK_CSR_ROW|GK_CSR_COL. \param norm indicates what norm is to normalize to, 1: 1-norm, 2: 2-norm */ /**************************************************************************/ void gk_csr_Normalize(gk_csr_t *mat, int what, int norm) { ssize_t i, j; int n; ssize_t *ptr; float *val, sum; if (what&GK_CSR_ROW && mat->rowval) { n = mat->nrows; ptr = mat->rowptr; val = mat->rowval; #pragma omp parallel if (ptr[n] > OMPMINOPS) { #pragma omp for private(j,sum) schedule(static) for (i=0; i<n; i++) { for (sum=0.0, j=ptr[i]; j<ptr[i+1]; j++){ if (norm == 2) sum += val[j]*val[j]; else if (norm == 1) sum += val[j]; /* assume val[j] > 0 */ } if (sum > 0) { if (norm == 2) sum=1.0/sqrt(sum); else if (norm == 1) sum=1.0/sum; for (j=ptr[i]; j<ptr[i+1]; j++) val[j] *= sum; } } } } if (what&GK_CSR_COL && mat->colval) { n = mat->ncols; ptr = mat->colptr; val = mat->colval; #pragma omp parallel if (ptr[n] > OMPMINOPS) { #pragma omp for private(j,sum) schedule(static) for (i=0; i<n; i++) { for (sum=0.0, j=ptr[i]; j<ptr[i+1]; j++) if (norm == 2) sum += val[j]*val[j]; else if (norm == 1) sum += val[j]; if (sum > 0) { if (norm == 2) sum=1.0/sqrt(sum); else if (norm == 1) sum=1.0/sum; for (j=ptr[i]; j<ptr[i+1]; j++) val[j] *= sum; } } } } } /*************************************************************************/ /*! Applies different row scaling methods. \param mat the matrix itself, \param type indicates the type of row scaling. Possible values are: GK_CSR_MAXTF, GK_CSR_SQRT, GK_CSR_LOG, GK_CSR_IDF, GK_CSR_MAXTF2. */ /**************************************************************************/ void gk_csr_Scale(gk_csr_t *mat, int type) { ssize_t i, j; int nrows, ncols, nnzcols, bgfreq; ssize_t *rowptr; int *rowind, *collen; float *rowval, *cscale, maxtf; nrows = mat->nrows; rowptr = mat->rowptr; rowind = mat->rowind; rowval = mat->rowval; switch (type) { case GK_CSR_MAXTF: /* TF' = .5 + .5*TF/MAX(TF) */ #pragma omp parallel if (rowptr[nrows] > OMPMINOPS) { #pragma omp for private(j, maxtf) schedule(static) for (i=0; i<nrows; i++) { maxtf = fabs(rowval[rowptr[i]]); for (j=rowptr[i]; j<rowptr[i+1]; j++) maxtf = (maxtf < fabs(rowval[j]) ? fabs(rowval[j]) : maxtf); for (j=rowptr[i]; j<rowptr[i+1]; j++) rowval[j] = .5 + .5*rowval[j]/maxtf; } } break; case GK_CSR_MAXTF2: /* TF' = .1 + .9*TF/MAX(TF) */ #pragma omp parallel if (rowptr[nrows] > OMPMINOPS) { #pragma omp for private(j, maxtf) schedule(static) for (i=0; i<nrows; i++) { maxtf = fabs(rowval[rowptr[i]]); for (j=rowptr[i]; j<rowptr[i+1]; j++) maxtf = (maxtf < fabs(rowval[j]) ? fabs(rowval[j]) : maxtf); for (j=rowptr[i]; j<rowptr[i+1]; j++) rowval[j] = .1 + .9*rowval[j]/maxtf; } } break; case GK_CSR_SQRT: /* TF' = .1+SQRT(TF) */ #pragma omp parallel if (rowptr[nrows] > OMPMINOPS) { #pragma omp for private(j) schedule(static) for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) { if (rowval[j] != 0.0) rowval[j] = .1+sign(rowval[j], sqrt(fabs(rowval[j]))); } } } break; case GK_CSR_POW25: /* TF' = .1+POW(TF,.25) */ #pragma omp parallel if (rowptr[nrows] > OMPMINOPS) { #pragma omp for private(j) schedule(static) for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) { if (rowval[j] != 0.0) rowval[j] = .1+sign(rowval[j], sqrt(sqrt(fabs(rowval[j])))); } } } break; case GK_CSR_POW65: /* TF' = .1+POW(TF,.65) */ #pragma omp parallel if (rowptr[nrows] > OMPMINOPS) { #pragma omp for private(j) schedule(static) for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) { if (rowval[j] != 0.0) rowval[j] = .1+sign(rowval[j], powf(fabs(rowval[j]), .65)); } } } break; case GK_CSR_POW75: /* TF' = .1+POW(TF,.75) */ #pragma omp parallel if (rowptr[nrows] > OMPMINOPS) { #pragma omp for private(j) schedule(static) for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) { if (rowval[j] != 0.0) rowval[j] = .1+sign(rowval[j], powf(fabs(rowval[j]), .75)); } } } break; case GK_CSR_POW85: /* TF' = .1+POW(TF,.85) */ #pragma omp parallel if (rowptr[nrows] > OMPMINOPS) { #pragma omp for private(j) schedule(static) for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) { if (rowval[j] != 0.0) rowval[j] = .1+sign(rowval[j], powf(fabs(rowval[j]), .85)); } } } break; case GK_CSR_LOG: /* TF' = 1+log_2(TF) */ #pragma omp parallel if (rowptr[nrows] > OMPMINOPS) { double logscale = 1.0/log(2.0); #pragma omp for schedule(static,32) for (i=0; i<rowptr[nrows]; i++) { if (rowval[i] != 0.0) rowval[i] = 1+(rowval[i]>0.0 ? log(rowval[i]) : -log(-rowval[i]))*logscale; } #ifdef XXX #pragma omp for private(j) schedule(static) for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) { if (rowval[j] != 0.0) rowval[j] = 1+(rowval[j]>0.0 ? log(rowval[j]) : -log(-rowval[j]))*logscale; //rowval[j] = 1+sign(rowval[j], log(fabs(rowval[j]))*logscale); } } #endif } break; case GK_CSR_IDF: /* TF' = TF*IDF */ ncols = mat->ncols; cscale = gk_fmalloc(ncols, "gk_csr_Scale: cscale"); collen = gk_ismalloc(ncols, 0, "gk_csr_Scale: collen"); for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) collen[rowind[j]]++; } #pragma omp parallel if (ncols > OMPMINOPS) { #pragma omp for schedule(static) for (i=0; i<ncols; i++) cscale[i] = (collen[i] > 0 ? log(1.0*nrows/collen[i]) : 0.0); } #pragma omp parallel if (rowptr[nrows] > OMPMINOPS) { #pragma omp for private(j) schedule(static) for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) rowval[j] *= cscale[rowind[j]]; } } gk_free((void **)&cscale, &collen, LTERM); break; case GK_CSR_IDF2: /* TF' = TF*IDF */ ncols = mat->ncols; cscale = gk_fmalloc(ncols, "gk_csr_Scale: cscale"); collen = gk_ismalloc(ncols, 0, "gk_csr_Scale: collen"); for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) collen[rowind[j]]++; } nnzcols = 0; #pragma omp parallel if (ncols > OMPMINOPS) { #pragma omp for schedule(static) reduction(+:nnzcols) for (i=0; i<ncols; i++) nnzcols += (collen[i] > 0 ? 1 : 0); bgfreq = gk_max(10, (ssize_t)(.5*rowptr[nrows]/nnzcols)); printf("nnz: %zd, nnzcols: %d, bgfreq: %d\n", rowptr[nrows], nnzcols, bgfreq); #pragma omp for schedule(static) for (i=0; i<ncols; i++) cscale[i] = (collen[i] > 0 ? log(1.0*(nrows+2*bgfreq)/(bgfreq+collen[i])) : 0.0); } #pragma omp parallel if (rowptr[nrows] > OMPMINOPS) { #pragma omp for private(j) schedule(static) for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) rowval[j] *= cscale[rowind[j]]; } } gk_free((void **)&cscale, &collen, LTERM); break; default: gk_errexit(SIGERR, "Unknown scaling type of %d\n", type); } } /*************************************************************************/ /*! Computes the sums of the rows/columns \param mat the matrix itself, \param what is either GK_CSR_ROW or GK_CSR_COL indicating which sums to compute. */ /**************************************************************************/ void gk_csr_ComputeSums(gk_csr_t *mat, int what) { ssize_t i; int n; ssize_t *ptr; float *val, *sums; switch (what) { case GK_CSR_ROW: n = mat->nrows; ptr = mat->rowptr; val = mat->rowval; if (mat->rsums) gk_free((void **)&mat->rsums, LTERM); sums = mat->rsums = gk_fsmalloc(n, 0, "gk_csr_ComputeSums: sums"); break; case GK_CSR_COL: n = mat->ncols; ptr = mat->colptr; val = mat->colval; if (mat->csums) gk_free((void **)&mat->csums, LTERM); sums = mat->csums = gk_fsmalloc(n, 0, "gk_csr_ComputeSums: sums"); break; default: gk_errexit(SIGERR, "Invalid sum type of %d.\n", what); return; } #pragma omp parallel for if (ptr[n] > OMPMINOPS) schedule(static) for (i=0; i<n; i++) sums[i] = gk_fsum(ptr[i+1]-ptr[i], val+ptr[i], 1); } /*************************************************************************/ /*! Computes the squared of the norms of the rows/columns \param mat the matrix itself, \param what is either GK_CSR_ROW or GK_CSR_COL indicating which squared norms to compute. */ /**************************************************************************/ void gk_csr_ComputeSquaredNorms(gk_csr_t *mat, int what) { ssize_t i; int n; ssize_t *ptr; float *val, *norms; switch (what) { case GK_CSR_ROW: n = mat->nrows; ptr = mat->rowptr; val = mat->rowval; if (mat->rnorms) gk_free((void **)&mat->rnorms, LTERM); norms = mat->rnorms = gk_fsmalloc(n, 0, "gk_csr_ComputeSums: norms"); break; case GK_CSR_COL: n = mat->ncols; ptr = mat->colptr; val = mat->colval; if (mat->cnorms) gk_free((void **)&mat->cnorms, LTERM); norms = mat->cnorms = gk_fsmalloc(n, 0, "gk_csr_ComputeSums: norms"); break; default: gk_errexit(SIGERR, "Invalid norm type of %d.\n", what); return; } #pragma omp parallel for if (ptr[n] > OMPMINOPS) schedule(static) for (i=0; i<n; i++) norms[i] = gk_fdot(ptr[i+1]-ptr[i], val+ptr[i], 1, val+ptr[i], 1); } /*************************************************************************/ /*! Computes the similarity between two rows/columns \param mat the matrix itself. The routine assumes that the indices are sorted in increasing order. \param i1 is the first row/column, \param i2 is the second row/column, \param what is either GK_CSR_ROW or GK_CSR_COL indicating the type of objects between the similarity will be computed, \param simtype is the type of similarity and is one of GK_CSR_COS, GK_CSR_JAC, GK_CSR_MIN, GK_CSR_AMIN \returns the similarity between the two rows/columns. */ /**************************************************************************/ float gk_csr_ComputeSimilarity(gk_csr_t *mat, int i1, int i2, int what, int simtype) { int nind1, nind2; int *ind1, *ind2; float *val1, *val2, stat1, stat2, sim; switch (what) { case GK_CSR_ROW: if (!mat->rowptr) gk_errexit(SIGERR, "Row-based view of the matrix does not exists.\n"); nind1 = mat->rowptr[i1+1]-mat->rowptr[i1]; nind2 = mat->rowptr[i2+1]-mat->rowptr[i2]; ind1 = mat->rowind + mat->rowptr[i1]; ind2 = mat->rowind + mat->rowptr[i2]; val1 = mat->rowval + mat->rowptr[i1]; val2 = mat->rowval + mat->rowptr[i2]; break; case GK_CSR_COL: if (!mat->colptr) gk_errexit(SIGERR, "Column-based view of the matrix does not exists.\n"); nind1 = mat->colptr[i1+1]-mat->colptr[i1]; nind2 = mat->colptr[i2+1]-mat->colptr[i2]; ind1 = mat->colind + mat->colptr[i1]; ind2 = mat->colind + mat->colptr[i2]; val1 = mat->colval + mat->colptr[i1]; val2 = mat->colval + mat->colptr[i2]; break; default: gk_errexit(SIGERR, "Invalid index type of %d.\n", what); return 0.0; } switch (simtype) { case GK_CSR_COS: case GK_CSR_JAC: sim = stat1 = stat2 = 0.0; i1 = i2 = 0; while (i1<nind1 && i2<nind2) { if (i1 == nind1) { stat2 += val2[i2]*val2[i2]; i2++; } else if (i2 == nind2) { stat1 += val1[i1]*val1[i1]; i1++; } else if (ind1[i1] < ind2[i2]) { stat1 += val1[i1]*val1[i1]; i1++; } else if (ind1[i1] > ind2[i2]) { stat2 += val2[i2]*val2[i2]; i2++; } else { sim += val1[i1]*val2[i2]; stat1 += val1[i1]*val1[i1]; stat2 += val2[i2]*val2[i2]; i1++; i2++; } } if (simtype == GK_CSR_COS) sim = (stat1*stat2 > 0.0 ? sim/sqrt(stat1*stat2) : 0.0); else sim = (stat1+stat2-sim > 0.0 ? sim/(stat1+stat2-sim) : 0.0); break; case GK_CSR_MIN: sim = stat1 = stat2 = 0.0; i1 = i2 = 0; while (i1<nind1 && i2<nind2) { if (i1 == nind1) { stat2 += val2[i2]; i2++; } else if (i2 == nind2) { stat1 += val1[i1]; i1++; } else if (ind1[i1] < ind2[i2]) { stat1 += val1[i1]; i1++; } else if (ind1[i1] > ind2[i2]) { stat2 += val2[i2]; i2++; } else { sim += gk_min(val1[i1],val2[i2]); stat1 += val1[i1]; stat2 += val2[i2]; i1++; i2++; } } sim = (stat1+stat2-sim > 0.0 ? sim/(stat1+stat2-sim) : 0.0); break; case GK_CSR_AMIN: sim = stat1 = stat2 = 0.0; i1 = i2 = 0; while (i1<nind1 && i2<nind2) { if (i1 == nind1) { stat2 += val2[i2]; i2++; } else if (i2 == nind2) { stat1 += val1[i1]; i1++; } else if (ind1[i1] < ind2[i2]) { stat1 += val1[i1]; i1++; } else if (ind1[i1] > ind2[i2]) { stat2 += val2[i2]; i2++; } else { sim += gk_min(val1[i1],val2[i2]); stat1 += val1[i1]; stat2 += val2[i2]; i1++; i2++; } } sim = (stat1 > 0.0 ? sim/stat1 : 0.0); break; default: gk_errexit(SIGERR, "Unknown similarity measure %d\n", simtype); return -1; } return sim; } /*************************************************************************/ /*! Finds the n most similar rows (neighbors) to the query using cosine similarity. \param mat the matrix itself \param nqterms is the number of columns in the query \param qind is the list of query columns \param qval is the list of correspodning query weights \param simtype is the type of similarity and is one of GK_CSR_COS, GK_CSR_JAC, GK_CSR_MIN, GK_CSR_AMIN \param nsim is the maximum number of requested most similar rows. If -1 is provided, then everything is returned unsorted. \param minsim is the minimum similarity of the requested most similar rows \param hits is the result set. This array should be at least of length nsim. \param i_marker is an array of size equal to the number of rows whose values are initialized to -1. If NULL is provided then this array is allocated and freed internally. \param i_cand is an array of size equal to the number of rows. If NULL is provided then this array is allocated and freed internally. \returns the number of identified most similar rows, which can be smaller than the requested number of nnbrs in those cases in which there are no sufficiently many neighbors. */ /**************************************************************************/ int gk_csr_GetSimilarRows(gk_csr_t *mat, int nqterms, int *qind, float *qval, int simtype, int nsim, float minsim, gk_fkv_t *hits, int *i_marker, gk_fkv_t *i_cand) { ssize_t i, ii, j, k; int nrows, ncols, ncand; ssize_t *colptr; int *colind, *marker; float *colval, *rnorms, mynorm, *rsums, mysum; gk_fkv_t *cand; if (nqterms == 0) return 0; nrows = mat->nrows; ncols = mat->ncols; colptr = mat->colptr; colind = mat->colind; colval = mat->colval; marker = (i_marker ? i_marker : gk_ismalloc(nrows, -1, "gk_csr_SimilarRows: marker")); cand = (i_cand ? i_cand : gk_fkvmalloc(nrows, "gk_csr_SimilarRows: cand")); switch (simtype) { case GK_CSR_COS: for (ncand=0, ii=0; ii<nqterms; ii++) { i = qind[ii]; if (i < ncols) { for (j=colptr[i]; j<colptr[i+1]; j++) { k = colind[j]; if (marker[k] == -1) { cand[ncand].val = k; cand[ncand].key = 0; marker[k] = ncand++; } cand[marker[k]].key += colval[j]*qval[ii]; } } } break; case GK_CSR_JAC: for (ncand=0, ii=0; ii<nqterms; ii++) { i = qind[ii]; if (i < ncols) { for (j=colptr[i]; j<colptr[i+1]; j++) { k = colind[j]; if (marker[k] == -1) { cand[ncand].val = k; cand[ncand].key = 0; marker[k] = ncand++; } cand[marker[k]].key += colval[j]*qval[ii]; } } } rnorms = mat->rnorms; mynorm = gk_fdot(nqterms, qval, 1, qval, 1); for (i=0; i<ncand; i++) cand[i].key = cand[i].key/(rnorms[cand[i].val]+mynorm-cand[i].key); break; case GK_CSR_MIN: for (ncand=0, ii=0; ii<nqterms; ii++) { i = qind[ii]; if (i < ncols) { for (j=colptr[i]; j<colptr[i+1]; j++) { k = colind[j]; if (marker[k] == -1) { cand[ncand].val = k; cand[ncand].key = 0; marker[k] = ncand++; } cand[marker[k]].key += gk_min(colval[j], qval[ii]); } } } rsums = mat->rsums; mysum = gk_fsum(nqterms, qval, 1); for (i=0; i<ncand; i++) cand[i].key = cand[i].key/(rsums[cand[i].val]+mysum-cand[i].key); break; /* Assymetric MIN similarity */ case GK_CSR_AMIN: for (ncand=0, ii=0; ii<nqterms; ii++) { i = qind[ii]; if (i < ncols) { for (j=colptr[i]; j<colptr[i+1]; j++) { k = colind[j]; if (marker[k] == -1) { cand[ncand].val = k; cand[ncand].key = 0; marker[k] = ncand++; } cand[marker[k]].key += gk_min(colval[j], qval[ii]); } } } mysum = gk_fsum(nqterms, qval, 1); for (i=0; i<ncand; i++) cand[i].key = cand[i].key/mysum; break; default: gk_errexit(SIGERR, "Unknown similarity measure %d\n", simtype); return -1; } /* go and prune the hits that are bellow minsim */ for (j=0, i=0; i<ncand; i++) { marker[cand[i].val] = -1; if (cand[i].key >= minsim) cand[j++] = cand[i]; } ncand = j; if (nsim == -1 || nsim >= ncand) { nsim = ncand; } else { nsim = gk_min(nsim, ncand); gk_dfkvkselect(ncand, nsim, cand); gk_fkvsortd(nsim, cand); } gk_fkvcopy(nsim, cand, hits); if (i_marker == NULL) gk_free((void **)&marker, LTERM); if (i_cand == NULL) gk_free((void **)&cand, LTERM); return nsim; }
stencil2d_cpu_omp_kernel.c
#include <homp.h> #include "stencil2d.h" void stencil2d_cpu_omp_wrapper(omp_offloading_t *off, int start, int len, long n, long m, int u_dimX, int u_dimY, REAL *u, REAL *uold, int radius, int coeff_dimX, REAL *coeff) { #if CORRECTNESS_CHECK BEGIN_SERIALIZED_PRINTF(off->devseqid); printf("udev: dev: %d, %dX%d\n", off->devseqid, n, m); print_array_dev("udev", off->devseqid, "u",(REAL*)u, n, m); printf("uolddev: dev: %d, %dX%d\n", off->devseqid, uold_0_length, uold_1_length); print_array_dev("uolddev", off->devseqid, "uold",(REAL*)uold, uold_0_length, uold_1_length); printf("i_start: %d, j_start: %d, n: %d, m: %d, uold_0_offset: %d, uold_1_offset: %d\n", i_start, j_start, n, m, uold_0_offset, uold_1_offset); END_SERIALIZED_PRINTF(); #endif int count = 4*radius+1; #ifdef SQUARE_SETNCIL count = coeff_dimX * coeff_dimX; #endif int ix, iy, ir; #pragma omp parallel for private(ix, iy, ir) for (ix = start; ix < start+len; ix++) { REAL * temp_u = &u[(ix+radius)*u_dimY+radius]; REAL * temp_uold = &uold[(ix+radius)*u_dimY+radius]; #pragma simd for (iy = 0; iy < m; iy++) { // if (off->devseqid == 0)printf("dev: %d, [%d][%d]:%f\n", off->devseqid, ix, iy, temp_u[0]); REAL result = temp_uold[0] * coeff[0]; /* 2/4 way loop unrolling */ for (ir = 1; ir <= radius; ir++) { result += coeff[ir] * temp_uold[ir]; //horizontal right result += coeff[-ir]* temp_uold[-ir]; // horizontal left result += coeff[-ir*coeff_dimX] * temp_uold[-ir * u_dimY]; //vertical up result += coeff[ir*coeff_dimX] * temp_uold[ir * u_dimY]; // vertical bottom #ifdef SQUARE_SETNCIL result += coeff[-ir*coeff_dimX-ir] * temp_uold[-ir * u_dimY-ir] // left upper corner result += coeff[-ir*coeff_dimX+ir] * temp_uold[-ir * u_dimY+ir] // right upper corner result += coeff[ir*coeff_dimX-ir] * temp_uold[ir * u_dimY]-ir] // left bottom corner result += coeff[ir*coeff_dimX+ir] * temp_uold[ir * u_dimY]+ir] // right bottom corner #endif } *temp_u = result/count; temp_u++; temp_uold++; } } }
3d7pt.c
/* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 32; tile_size[1] = 32; tile_size[2] = 4; tile_size[3] = 64; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k]) + beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] + A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
matvec.c
#include <stdio.h> #include <math.h> #include <float.h> #include <stdlib.h> #include <omp.h> #include "ctimer.h" main(int argc, char**argv) { ////// PRODUCTO MATRIZ-VECTOR x=A*b ////// // DECLARACION DE VARIABLES // // DECLARACION DE VARIABLES // double t1,t2,tucpu,tscpu; const int TALLA= 4; const long int M= 6400; const long int N= 3200; int i; int j; double *A; double *b; double *x; double *z; A=malloc(M*N*sizeof(double)); b=malloc(M*sizeof(double)); x=malloc(M*sizeof(double)); z=malloc(M*sizeof(double)); double suma; srand(time(0)); printf("/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/\n"); printf("/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/\n"); printf("Programa que calcula el Producto Matriz-Vector. \n"); printf("------- \n"); // GENERACION DE DATOS // // for (i=0;i<M;i++){ for (j=0;j<N;j++) A[i+j*M]=rand() % TALLA; b[i]=rand() % TALLA; } // PRODUCTO MATRIZ-VECTOR SECUENCIAL // // // printf("Voy a empezar el Producto Matriz-Vector secuencial. \n"); printf(" ------- \n"); double alfa; ctimer(&t1,&tucpu,&tscpu); for(i=0;i<M;i++){ alfa=0.0; for (j=0;j<N;j++) alfa=alfa+A[i+j*M]*b[j]; z[i]=alfa; } ctimer(&t2,&tucpu,&tscpu); printf("Producto Producto MatxVec MatxVec secuencial secuencial z(5) %f \n",z[5]); printf(" ------- \n"); printf("Tiempo %f segundos \n",(float) (t2-t1)); printf(" ------- \n"); // PRODUCTO MATRIZ // PRODUCTO MATRIZ-VECTOR PARALELO / VECTOR PARALELO // printf("Empiezo el producto Matriz-Vector paralelo\n"); printf(" ------- \n"); ctimer(&t1,&tucpu,&tscpu); int tb; int tid; tb=M/TALLA; omp_set_num_threads(TALLA); #pragma omp parallel private(tid,i,j,alfa) { tid = omp_get_thread_num(); for (i=0;i<tb;i++){ alfa=0; for (j=0;j<N;j++) alfa=alfa+A[i+tb*tid+j*M]*b[j]; x[i+tb*tid]=alfa; } } ctimer(&t2,&tucpu,&tscpu); // SALIDA DE RESULTADOS // printf("He terminado el Producto MatxVec paralelo \n"); printf(" ------- \n"); printf("Producto MatxVec paralelo x(5)= %f \n",x[5]); printf(" ------- \n"); // Fin del calculo del Producto Matriz-Vector paralelo printf("Tiempo %f segundos \n",(float) (t2-t1)); printf(" ------- \n"); printf(" Comprobación \n"); suma=0.0; for (i=1;i<M;i++) suma=suma + (x[i]-z[i])*(x[i]-z[i]); suma=sqrt(suma); printf("norma dif SEQ-PAR=%f\n",suma); printf(" ------- \n"); printf("He acabado. \n"); printf("/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/\n"); printf("/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/\n"); }
openmp-test.c
/* Copyright (c) 1997-2019 OpenMP Architecture Review Board. All rights reserved. Permission to redistribute and use without fee all or part of the source codes and the associated document (the Software), with or without modification, is granted, provided that the following conditions are met: * Redistributions of the software must retain the above copyright notice, this list of conditions and the following disclaimer. * Neither the name of the OpenMP Architecture Review Board nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE OPENMP ARCHITECTURE REVIEW BOARD "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OPENMP ARCHITECTURE REVIEW BOARD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * @@name: fpriv_sections.1c * @@type: C * @@compilable: yes * @@linkable: yes * @@expect: success */ #include <omp.h> #include <stdio.h> #define NT 4 int main( ) { int section_count = 0; omp_set_dynamic(0); omp_set_num_threads(NT); #pragma omp parallel #pragma omp sections firstprivate( section_count ) { #pragma omp section { section_count++; /* may print the number one or two */ printf( "section_count %d\n", section_count ); } #pragma omp section { section_count++; /* may print the number one or two */ printf( "section_count %d\n", section_count ); } } return 0; }
cryptocontext.h
// @file cryptocontext.h -- Control for encryption operations. // @author TPOC: contact@palisade-crypto.org // // @copyright Copyright (c) 2019, New Jersey Institute of Technology (NJIT)) // All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // 1. Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. THIS SOFTWARE IS // PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF // MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO // EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, // INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #ifndef SRC_PKE_CRYPTOCONTEXT_H_ #define SRC_PKE_CRYPTOCONTEXT_H_ #include <map> #include <memory> #include <string> #include <utility> #include <vector> #include "palisade.h" #include "scheme/allscheme.h" #include "cryptocontexthelper.h" #include "cryptotiming.h" #include "utils/caller_info.h" #include "utils/serial.h" #include "utils/serialize-binary.h" #include "utils/serialize-json.h" namespace lbcrypto { template <typename Element> class CryptoContextFactory; template <typename Element> class CryptoContextImpl; template <typename Element> using CryptoContext = shared_ptr<CryptoContextImpl<Element>>; /** * @brief CryptoContextImpl * * A CryptoContextImpl is the object used to access the PALISADE library * * All PALISADE functionality is accessed by way of an instance of a * CryptoContextImpl; we say that various objects are "created in" a context, * and can only be used in the context in which they were created * * All PALISADE methods are accessed through CryptoContextImpl methods. Guards * are implemented to make certain that only valid objects that have been * created in the context are used * * Contexts are created using the CryptoContextFactory, and can be serialized * and recovered from a serialization */ template <typename Element> class CryptoContextImpl : public Serializable { using IntType = typename Element::Integer; using ParmType = typename Element::Params; friend class CryptoContextFactory<Element>; protected: // crypto parameters used for this context shared_ptr<LPCryptoParameters<Element>> params; // algorithm used; accesses all crypto methods shared_ptr<LPPublicKeyEncryptionScheme<Element>> scheme; static std::map<string, std::vector<LPEvalKey<Element>>>& evalMultKeyMap() { // cached evalmult keys, by secret key UID static std::map<string, std::vector<LPEvalKey<Element>>> s_evalMultKeyMap; return s_evalMultKeyMap; } static std::map<string, shared_ptr<std::map<usint, LPEvalKey<Element>>>>& evalSumKeyMap() { // cached evalsum keys, by secret key UID static std::map<string, shared_ptr<std::map<usint, LPEvalKey<Element>>>> s_evalSumKeyMap; return s_evalSumKeyMap; } static std::map<string, shared_ptr<std::map<usint, LPEvalKey<Element>>>>& evalAutomorphismKeyMap() { // cached evalautomorphism keys, by secret key UID static std::map<string, shared_ptr<std::map<usint, LPEvalKey<Element>>>> s_evalAutomorphismKeyMap; return s_evalAutomorphismKeyMap; } bool doTiming; vector<TimingInfo>* timeSamples; string m_schemeId; size_t m_keyGenLevel; /** * TypeCheck makes sure that an operation between two ciphertexts is permitted * @param a * @param b */ void TypeCheck(ConstCiphertext<Element> a, ConstCiphertext<Element> b, CALLER_INFO_ARGS_HDR) const { if (a == nullptr || b == nullptr) { std::string errorMsg(std::string("Null Ciphertext") + CALLER_INFO); PALISADE_THROW(type_error, errorMsg); } if (a->GetCryptoContext().get() != this) { std::string errorMsg( std::string("Ciphertext was not created in this CryptoContext") + CALLER_INFO); PALISADE_THROW(type_error, errorMsg); } if (a->GetCryptoContext() != b->GetCryptoContext()) { std::string errorMsg( std::string( "Ciphertexts were not created in the same CryptoContext") + CALLER_INFO); PALISADE_THROW(type_error, errorMsg); } if (a->GetKeyTag() != b->GetKeyTag()) { std::string errorMsg( std::string("Ciphertexts were not encrypted with same keys") + CALLER_INFO); PALISADE_THROW(type_error, errorMsg); } if (a->GetEncodingType() != b->GetEncodingType()) { std::stringstream ss; ss << "Ciphertext encoding types " << a->GetEncodingType(); ss << " and " << b->GetEncodingType(); ss << " do not match"; ss << CALLER_INFO; PALISADE_THROW(type_error, ss.str()); } } /** * TypeCheck makes sure that an operation between two ciphertexts is permitted * This is intended for mutable methods, hence inputs are Ciphretext instead * of ConstCiphertext. * * @param a * @param b */ /* void TypeCheck(Ciphertext<Element> a, Ciphertext<Element> b, CALLER_INFO_ARGS_HDR) const { if (a == nullptr || b == nullptr) { std::string errorMsg(std::string("Null Ciphertext") + CALLER_INFO); PALISADE_THROW(type_error, errorMsg); } if (a->GetCryptoContext().get() != this) { std::string errorMsg( std::string("Ciphertext was not created in this CryptoContext") + CALLER_INFO); PALISADE_THROW(type_error, errorMsg); } if (a->GetCryptoContext() != b->GetCryptoContext()) { std::string errorMsg( std::string("Ciphertexts were not created in the same CryptoContext") + CALLER_INFO); PALISADE_THROW(type_error, errorMsg); } if (a->GetKeyTag() != b->GetKeyTag()) { std::string errorMsg( std::string("Ciphertexts were not encrypted with same keys") + CALLER_INFO); PALISADE_THROW(type_error, errorMsg); } if (a->GetEncodingType() != b->GetEncodingType()) { std::stringstream ss; ss << "Ciphertext encoding types " << a->GetEncodingType(); ss << " and " << b->GetEncodingType(); ss << " do not match"; ss << CALLER_INFO; PALISADE_THROW(type_error, ss.str()); } } */ /** * TypeCheck makes sure that an operation between a ciphertext and a plaintext * is permitted * @param a * @param b */ void TypeCheck(ConstCiphertext<Element> a, ConstPlaintext b, CALLER_INFO_ARGS_HDR) const { if (a == nullptr) { std::string errorMsg(std::string("Null Ciphertext") + CALLER_INFO); PALISADE_THROW(type_error, errorMsg); } if (b == nullptr) { std::string errorMsg(std::string("Null Plaintext") + CALLER_INFO); PALISADE_THROW(type_error, errorMsg); } if (a->GetCryptoContext().get() != this) { std::string errorMsg( std::string("Ciphertext was not created in this CryptoContext") + CALLER_INFO); PALISADE_THROW(type_error, errorMsg); } if (a->GetEncodingType() != b->GetEncodingType()) { std::stringstream ss; ss << "Ciphertext encoding type " << a->GetEncodingType(); ss << " and Plaintext encoding type " << b->GetEncodingType(); ss << " do not match"; ss << CALLER_INFO; PALISADE_THROW(type_error, ss.str()); } } /** * TypeCheck makes sure that an operation between two ciphertexts is permitted * @param a * @param b */ void TypeCheck(const RationalCiphertext<Element>& a, const RationalCiphertext<Element>& b, CALLER_INFO_ARGS_HDR) const { if (a.GetCryptoContext().get() != this) { std::string errorMsg( std::string("Ciphertext was not created in this CryptoContextImpl") + CALLER_INFO); PALISADE_THROW(type_error, errorMsg); } if (a.GetCryptoContext() != b.GetCryptoContext()) { std::string errorMsg( std::string( "Ciphertexts were not created in the same CryptoContextImpl") + CALLER_INFO); PALISADE_THROW(type_error, errorMsg); } if (a.GetKeyTag() != b.GetKeyTag()) { std::string errorMsg( std::string("Ciphertexts were not encrypted with same keys") + CALLER_INFO); PALISADE_THROW(type_error, errorMsg); } if (a.GetNumerator()->GetEncodingType() != b.GetNumerator()->GetEncodingType()) { std::stringstream ss; ss << "RationalCiphertext encoding types " << a.GetNumerator()->GetEncodingType(); ss << " and " << b.GetNumerator()->GetEncodingType(); ss << " do not match"; ss << CALLER_INFO; PALISADE_THROW(type_error, ss.str()); } } /** * TypeCheck makes sure that an operation between a ciphertext and a plaintext * is permitted * @param a * @param b */ void TypeCheck(const RationalCiphertext<Element>& a, ConstPlaintext b, CALLER_INFO_ARGS_HDR) const { if (b == nullptr) { std::string errorMsg(std::string("Null Plaintext") + CALLER_INFO); PALISADE_THROW(type_error, errorMsg); } if (a.GetCryptoContext().get() != this) { std::string errorMsg( std::string("Ciphertext was not created in this CryptoContextImpl") + CALLER_INFO); PALISADE_THROW(type_error, errorMsg); } if (a.GetNumerator()->GetEncodingType() != b->GetEncodingType()) { std::stringstream ss; ss << "RationalCiphertext encoding type " << a.GetNumerator()->GetEncodingType(); ss << " and Plaintext encoding type " << b->GetEncodingType(); ss << " do not match"; ss << CALLER_INFO; PALISADE_THROW(type_error, ss.str()); } } bool Mismatched(const CryptoContext<Element> a) const { if (a.get() != this) { return true; } return false; } public: LPPrivateKey<Element> privateKey; /** * This stores the private key in the crypto context. * This is only intended for debugging and should not be * used in production systems. Please define DEBUG_KEY in * palisade.h to enable this. * * If used, one can create a key pair and store the secret * key in th crypto context like this: * * auto keys = cc->KeyGen(); * cc->SetPrivateKey(keys.secretKey); * * After that, anyone in the code, one can access the * secret key by getting the crypto context and doing the * following: * * auto sk = cc->GetPrivateKey(); * * This key can be used for decrypting any intermediate * ciphertexts for debugging purposes. * * @param sk the secret key * */ void SetPrivateKey(const LPPrivateKey<Element> sk) { #ifdef DEBUG_KEY cerr << "Warning - SetPrivateKey is only intended to be used for debugging " "purposes - not for production systems." << endl; this->privateKey = sk; #else PALISADE_THROW( not_available_error, "SetPrivateKey is only allowed if DEBUG_KEY is set in palisade.h"); #endif } /** * This gets the private key from the crypto context. * This is only intended for debugging and should not be * used in production systems. Please define DEBUG_KEY in * palisade.h to enable this. * * If used, one can create a key pair and store the secret * key in th crypto context like this: * * auto keys = cc->KeyGen(); * cc->SetPrivateKey(keys.secretKey); * * After that, anyone in the code, one can access the * secret key by getting the crypto context and doing the * following: * * auto sk = cc->GetPrivateKey(); * * This key can be used for decrypting any intermediate * ciphertexts for debugging purposes. * * @return the secret key * */ const LPPrivateKey<Element> GetPrivateKey() { #ifdef DEBUG_KEY return this->privateKey; #else PALISADE_THROW( not_available_error, "GetPrivateKey is only allowed if DEBUG_KEY is set in palisade.h"); #endif } void setSchemeId(string schemeTag) { this->m_schemeId = schemeTag; } string getSchemeId() const { return this->m_schemeId; } /** * CryptoContextImpl constructor from pointers to parameters and scheme * @param params - pointer to CryptoParameters * @param scheme - pointer to Crypto Scheme */ CryptoContextImpl(LPCryptoParameters<Element>* params = nullptr, LPPublicKeyEncryptionScheme<Element>* scheme = nullptr, const string& schemeId = "Not") { this->params.reset(params); this->scheme.reset(scheme); this->doTiming = false; this->timeSamples = 0; this->m_keyGenLevel = 0; this->m_schemeId = schemeId; } /** * CryptoContextImpl constructor from shared pointers to parameters and scheme * @param params - shared pointer to CryptoParameters * @param scheme - sharedpointer to Crypto Scheme */ CryptoContextImpl(shared_ptr<LPCryptoParameters<Element>> params, shared_ptr<LPPublicKeyEncryptionScheme<Element>> scheme, const string& schemeId = "Not") { this->params = params; this->scheme = scheme; this->doTiming = false; this->timeSamples = 0; this->m_keyGenLevel = 0; this->m_schemeId = schemeId; } /** * Copy constructor * @param c - source */ CryptoContextImpl(const CryptoContextImpl<Element>& c) { params = c.params; scheme = c.scheme; doTiming = c.doTiming; timeSamples = c.timeSamples; this->m_keyGenLevel = 0; this->m_schemeId = c.m_schemeId; } /** * Assignment * @param rhs - assigning from * @return this */ CryptoContextImpl<Element>& operator=(const CryptoContextImpl<Element>& rhs) { params = rhs.params; scheme = rhs.scheme; doTiming = rhs.doTiming; timeSamples = rhs.timeSamples; m_keyGenLevel = rhs.m_keyGenLevel; m_schemeId = rhs.m_schemeId; return *this; } /** * A CryptoContextImpl is only valid if the shared pointers are both valid */ operator bool() const { return params && scheme; } /** * Private methods to compare two contexts; this is only used internally and * is not generally available * @param a - operand 1 * @param b - operand 2 * @return true if the implementations have identical parms and scheme */ friend bool operator==(const CryptoContextImpl<Element>& a, const CryptoContextImpl<Element>& b) { // Identical if the parameters and the schemes are identical... the exact // same object, OR the same type and the same values if (a.params.get() == b.params.get()) { return true; } else { if (typeid(*a.params.get()) != typeid(*b.params.get())) { return false; } if (*a.params.get() != *b.params.get()) return false; } if (a.scheme.get() == b.scheme.get()) { return true; } else { if (typeid(*a.scheme.get()) != typeid(*b.scheme.get())) { return false; } if (*a.scheme.get() != *b.scheme.get()) return false; } return true; } friend bool operator!=(const CryptoContextImpl<Element>& a, const CryptoContextImpl<Element>& b) { return !(a == b); } // TIMING METHODS /** * StartTiming method activates timing of CryptoMethods * * @param timeSamples points to a vector in which timing samples will be * stored */ void StartTiming(vector<TimingInfo>* timeSamples) { this->timeSamples = timeSamples; doTiming = true; } /* * StopTiming - turns off timing */ void StopTiming() { doTiming = false; } /** * ResumeTiming - re-enables timing with existing TimingInfo vector */ void ResumeTiming() { doTiming = true; } /** * ResetTiming - erases measurements */ void ResetTiming() { this->timeSamples->clear(); } static bool SerializeEvalMultKey(Serialized* serObj) __attribute__(( deprecated("serialization changed, see wiki for details"))); static bool SerializeEvalMultKey(Serialized* serObj, const string& id) __attribute__(( deprecated("serialization changed, see wiki for details"))); static bool SerializeEvalMultKey(Serialized* serObj, const CryptoContext<Element> cc) __attribute__(( deprecated("serialization changed, see wiki for details"))); static bool DeserializeEvalMultKey(Serialized* serObj) __attribute__(( deprecated("serialization changed, see wiki for details"))); /** * SerializeEvalMultKey for a single EvalMult key or all EvalMult keys * * @param ser - stream to serialize to * @param sertype - type of serialization * @param id for key to serialize - if empty string, serialize them all * @return true on success */ template <typename ST> static bool SerializeEvalMultKey(std::ostream& ser, const ST& sertype, string id = ""); /** * SerializeEvalMultKey for all EvalMultKeys made in a given context * * @param cc whose keys should be serialized * @param ser - stream to serialize to * @param sertype - type of serialization * @return true on success (false on failure or no keys found) */ template <typename ST> static bool SerializeEvalMultKey(std::ostream& ser, const ST& sertype, const CryptoContext<Element> cc) { std::map<string, std::vector<LPEvalKey<Element>>> omap; for (const auto& k : GetAllEvalMultKeys()) { if (k.second[0]->GetCryptoContext() == cc) { omap[k.first] = k.second; } } if (omap.size() == 0) return false; Serial::Serialize(omap, ser, sertype); return true; } /** * DeserializeEvalMultKey deserialize all keys in the serialization * deserialized keys silently replace any existing matching keys * deserialization will create CryptoContextImpl if necessary * * @param serObj - stream with a serialization * @return true on success */ template <typename ST> static bool DeserializeEvalMultKey(std::istream& ser, const ST& sertype) { std::map<string, std::vector<LPEvalKey<Element>>> evalMultKeys; Serial::Deserialize(GetAllEvalMultKeys(), ser, sertype); // The deserialize call created any contexts that needed to be created.... // so all we need to do is put the keys into the maps for their context for (auto k : GetAllEvalMultKeys()) { GetAllEvalMultKeys()[k.first] = k.second; } return true; } /** * ClearEvalMultKeys - flush EvalMultKey cache */ static void ClearEvalMultKeys(); /** * ClearEvalMultKeys - flush EvalMultKey cache for a given id * @param id */ static void ClearEvalMultKeys(const string& id); /** * ClearEvalMultKeys - flush EvalMultKey cache for a given context * @param cc */ static void ClearEvalMultKeys(const CryptoContext<Element> cc); /** * InsertEvalMultKey - add the given vector of keys to the map, replacing the * existing vector if there * @param vectorToInsert */ static void InsertEvalMultKey( const std::vector<LPEvalKey<Element>>& vectorToInsert); static bool SerializeEvalSumKey(Serialized* serObj) __attribute__(( deprecated("serialization changed, see wiki for details"))); static bool SerializeEvalSumKey(Serialized* serObj, const string& id) __attribute__(( deprecated("serialization changed, see wiki for details"))); static bool SerializeEvalSumKey(Serialized* serObj, const CryptoContext<Element> cc) __attribute__(( deprecated("serialization changed, see wiki for details"))); static bool DeserializeEvalSumKey(const Serialized& serObj) __attribute__(( deprecated("serialization changed, see wiki for details"))); /** * SerializeEvalSumKey for a single EvalSum key or all of the EvalSum keys * * @param ser - stream to serialize to * @param sertype - type of serialization * @param id - key to serialize; empty string means all keys * @return true on success */ template <typename ST> static bool SerializeEvalSumKey(std::ostream& ser, const ST& sertype, string id = "") { std::map<string, shared_ptr<std::map<usint, LPEvalKey<Element>>>>* smap; std::map<string, shared_ptr<std::map<usint, LPEvalKey<Element>>>> omap; if (id.length() == 0) { smap = &GetAllEvalSumKeys(); } else { auto k = GetAllEvalSumKeys().find(id); if (k == GetAllEvalSumKeys().end()) return false; // no such id smap = &omap; omap[k->first] = k->second; } Serial::Serialize(*smap, ser, sertype); return true; } /** * SerializeEvalSumKey for all of the EvalSum keys for a context * * @param ser - stream to serialize to * @param sertype - type of serialization * @param cc - context * @return true on success */ template <typename ST> static bool SerializeEvalSumKey(std::ostream& ser, const ST& sertype, const CryptoContext<Element> cc) { std::map<string, shared_ptr<std::map<usint, LPEvalKey<Element>>>> omap; for (const auto& k : GetAllEvalSumKeys()) { if (k.second->begin()->second->GetCryptoContext() == cc) { omap[k.first] = k.second; } } if (omap.size() == 0) return false; Serial::Serialize(omap, ser, sertype); return true; } /** * DeserializeEvalSumKey deserialize all keys in the serialization * deserialized keys silently replace any existing matching keys * deserialization will create CryptoContextImpl if necessary * * @param ser - stream to serialize from * @param sertype - type of serialization * @return true on success */ template <typename ST> static bool DeserializeEvalSumKey(std::istream& ser, const ST& sertype) { std::map<string, shared_ptr<std::map<usint, LPEvalKey<Element>>>> evalSumKeys; Serial::Deserialize(evalSumKeys, ser, sertype); // The deserialize call created any contexts that needed to be created.... // so all we need to do is put the keys into the maps for their context for (auto k : evalSumKeys) { GetAllEvalSumKeys()[k.first] = k.second; } return true; } /** * ClearEvalSumKeys - flush EvalSumKey cache */ static void ClearEvalSumKeys(); /** * ClearEvalSumKeys - flush EvalSumKey cache for a given id * @param id */ static void ClearEvalSumKeys(const string& id); /** * ClearEvalSumKeys - flush EvalSumKey cache for a given context * @param cc */ static void ClearEvalSumKeys(const CryptoContext<Element> cc); /** * InsertEvalSumKey - add the given map of keys to the map, replacing the * existing map if there * @param mapToInsert */ static void InsertEvalSumKey( const shared_ptr<std::map<usint, LPEvalKey<Element>>> mapToInsert); static bool SerializeEvalAutomorphismKey(Serialized* serObj) __attribute__(( deprecated("serialization changed, see wiki for details"))); static bool SerializeEvalAutomorphismKey(Serialized* serObj, const string& id) __attribute__(( deprecated("serialization changed, see wiki for details"))); static bool SerializeEvalAutomorphismKey(Serialized* serObj, const CryptoContext<Element> cc) __attribute__(( deprecated("serialization changed, see wiki for details"))); static bool DeserializeEvalAutomorphismKey(const Serialized& serObj) __attribute__(( deprecated("serialization changed, see wiki for details"))); /** * SerializeEvalAutomorphismKey for a single EvalAuto key or all of the * EvalAuto keys * * @param ser - stream to serialize to * @param sertype - type of serialization * @param id - key to serialize; empty string means all keys * @return true on success */ template <typename ST> static bool SerializeEvalAutomorphismKey(std::ostream& ser, const ST& sertype, string id = "") { std::map<string, shared_ptr<std::map<usint, LPEvalKey<Element>>>>* smap; std::map<string, shared_ptr<std::map<usint, LPEvalKey<Element>>>> omap; if (id.length() == 0) { smap = &GetAllEvalAutomorphismKeys(); } else { auto k = GetAllEvalAutomorphismKeys().find(id); if (k == GetAllEvalAutomorphismKeys().end()) return false; // no such id smap = &omap; omap[k->first] = k->second; } Serial::Serialize(*smap, ser, sertype); return true; } /** * SerializeEvalAutomorphismKey for all of the EvalAuto keys for a context * * @param ser - stream to serialize to * @param sertype - type of serialization * @param cc - context * @return true on success */ template <typename ST> static bool SerializeEvalAutomorphismKey(std::ostream& ser, const ST& sertype, const CryptoContext<Element> cc) { std::map<string, shared_ptr<std::map<usint, LPEvalKey<Element>>>> omap; for (const auto& k : GetAllEvalAutomorphismKeys()) { if (k.second->begin()->second->GetCryptoContext() == cc) { omap[k.first] = k.second; } } if (omap.size() == 0) return false; Serial::Serialize(omap, ser, sertype); return true; } /** * DeserializeEvalAutomorphismKey deserialize all keys in the serialization * deserialized keys silently replace any existing matching keys * deserialization will create CryptoContextImpl if necessary * * @param ser - stream to serialize from * @param sertype - type of serialization * @return true on success */ template <typename ST> static bool DeserializeEvalAutomorphismKey(std::istream& ser, const ST& sertype) { std::map<string, shared_ptr<std::map<usint, LPEvalKey<Element>>>> evalSumKeys; Serial::Deserialize(evalSumKeys, ser, sertype); // The deserialize call created any contexts that needed to be created.... // so all we need to do is put the keys into the maps for their context for (auto k : evalSumKeys) { GetAllEvalAutomorphismKeys()[k.first] = k.second; } return true; } /** * ClearEvalAutomorphismKeys - flush EvalAutomorphismKey cache */ static void ClearEvalAutomorphismKeys(); /** * ClearEvalAutomorphismKeys - flush EvalAutomorphismKey cache for a given id * @param id */ static void ClearEvalAutomorphismKeys(const string& id); /** * ClearEvalAutomorphismKeys - flush EvalAutomorphismKey cache for a given * context * @param cc */ static void ClearEvalAutomorphismKeys(const CryptoContext<Element> cc); /** * InsertEvalAutomorphismKey - add the given map of keys to the map, replacing * the existing map if there * @param mapToInsert */ static void InsertEvalAutomorphismKey( const shared_ptr<std::map<usint, LPEvalKey<Element>>> mapToInsert); // TURN FEATURES ON /** * Enable a particular feature for use with this CryptoContextImpl * @param feature - the feature that should be enabled */ void Enable(PKESchemeFeature feature) { scheme->Enable(feature); } /** * Enable several features at once * @param featureMask - bitwise or of several PKESchemeFeatures */ void Enable(usint featureMask) { scheme->Enable(featureMask); } // GETTERS /** * Getter for Scheme * @return scheme */ const shared_ptr<LPPublicKeyEncryptionScheme<Element>> GetEncryptionAlgorithm() const { return scheme; } /** * Getter for CryptoParams * @return params */ const shared_ptr<LPCryptoParameters<Element>> GetCryptoParameters() const { return params; } size_t GetKeyGenLevel() const { return m_keyGenLevel; } void SetKeyGenLevel(size_t level) { m_keyGenLevel = level; } /** * Getter for element params * @return */ const shared_ptr<ParmType> GetElementParams() const { return params->GetElementParams(); } /** * Getter for encoding params * @return */ const EncodingParams GetEncodingParams() const { return params->GetEncodingParams(); } /** * Get the cyclotomic order used for this context * * @return */ usint GetCyclotomicOrder() const { return params->GetElementParams()->GetCyclotomicOrder(); } /** * Get the ring dimension used for this context * * @return */ usint GetRingDimension() const { return params->GetElementParams()->GetRingDimension(); } /** * Get the ciphertext modulus used for this context * * @return */ const IntType& GetModulus() const { return params->GetElementParams()->GetModulus(); } /** * Get the ciphertext modulus used for this context * * @return */ const IntType& GetRootOfUnity() const { return params->GetElementParams()->GetRootOfUnity(); } /** * KeyGen generates a key pair using this algorithm's KeyGen method * @return a public/secret key pair */ LPKeyPair<Element> KeyGen() { TimeVar t; if (doTiming) TIC(t); auto r = GetEncryptionAlgorithm()->KeyGen( CryptoContextFactory<Element>::GetContextForPointer(this), false); if (doTiming) { timeSamples->push_back(TimingInfo(OpKeyGen, TOC_US(t))); } return r; } /** * Threshold FHE: Generation of a public key derived * from a previous joined public key (for prior secret shares) and the secret * key share of the current party. * * @param pk joined public key from prior parties. * @param makeSparse set to true if ring reduce by a factor of 2 is to be * used. NOT SUPPORTED BY ANY SCHEME ANYMORE. * @param fresh set to true if proxy re-encryption is used in the multi-party * protocol or star topology is used * @return key pair including the secret share for the current party and * joined public key */ LPKeyPair<Element> MultipartyKeyGen(const LPPublicKey<Element> pk, bool makeSparse = false, bool fresh = false) { if (!pk) PALISADE_THROW(config_error, "Input public key is empty"); TimeVar t; if (doTiming) TIC(t); auto r = GetEncryptionAlgorithm()->MultipartyKeyGen( CryptoContextFactory<Element>::GetContextForPointer(this), pk, makeSparse, fresh); if (doTiming) { timeSamples->push_back(TimingInfo(OpMultiPartyKeyGenKey, TOC_US(t))); } return r; } /** * Threshold FHE: Generates a public key from a vector of secret shares. * ONLY FOR DEBUGGIN PURPOSES. SHOULD NOT BE USED IN PRODUCTION. * * @param secretkeys secrete key shares. * @return key pair including the private for the current party and joined * public key */ LPKeyPair<Element> MultipartyKeyGen( const vector<LPPrivateKey<Element>>& secretKeys) { if (!secretKeys.size()) PALISADE_THROW(config_error, "Input private key vector is empty"); TimeVar t; if (doTiming) TIC(t); auto r = GetEncryptionAlgorithm()->MultipartyKeyGen( CryptoContextFactory<Element>::GetContextForPointer(this), secretKeys, false); if (doTiming) { timeSamples->push_back(TimingInfo(OpMultiPartyKeyGenKeyvec, TOC_US(t))); } return r; } /** * Threshold FHE: Method for decryption operation run by the lead decryption * client * * @param privateKey secret key share used for decryption. * @param ciphertext ciphertext id decrypted. */ vector<Ciphertext<Element>> MultipartyDecryptLead( const LPPrivateKey<Element> privateKey, const vector<Ciphertext<Element>>& ciphertext) const { if (privateKey == nullptr || Mismatched(privateKey->GetCryptoContext())) PALISADE_THROW(config_error, "Information passed to MultipartyDecryptLead was not " "generated with this crypto context"); vector<Ciphertext<Element>> newCiphertext; TimeVar t; if (doTiming) TIC(t); for (size_t i = 0; i < ciphertext.size(); i++) { if (ciphertext[i] == nullptr || Mismatched(ciphertext[i]->GetCryptoContext())) PALISADE_THROW(config_error, "A ciphertext passed to MultipartyDecryptLead was not " "generated with this crypto context"); newCiphertext.push_back(GetEncryptionAlgorithm()->MultipartyDecryptLead( privateKey, ciphertext[i])); } if (doTiming) { timeSamples->push_back(TimingInfo(OpMultiPartyDecryptLead, TOC_US(t))); } return newCiphertext; } /** * Threshold FHE: "Partial" decryption computed by all parties except for the * lead one * * @param privateKey secret key share used for decryption. * @param ciphertext ciphertext that is being decrypted. */ vector<Ciphertext<Element>> MultipartyDecryptMain( const LPPrivateKey<Element> privateKey, const vector<Ciphertext<Element>>& ciphertext) const { if (privateKey == nullptr || Mismatched(privateKey->GetCryptoContext())) PALISADE_THROW(config_error, "Information passed to MultipartyDecryptMain was not " "generated with this crypto context"); vector<Ciphertext<Element>> newCiphertext; TimeVar t; if (doTiming) TIC(t); for (size_t i = 0; i < ciphertext.size(); i++) { if (ciphertext[i] == nullptr || Mismatched(ciphertext[i]->GetCryptoContext())) PALISADE_THROW(config_error, "A ciphertext passed to MultipartyDecryptMain was not " "generated with this crypto context"); newCiphertext.push_back(GetEncryptionAlgorithm()->MultipartyDecryptMain( privateKey, ciphertext[i])); } if (doTiming) { timeSamples->push_back(TimingInfo(OpMultiPartyDecryptMain, TOC_US(t))); } return newCiphertext; } /** * Threshold FHE: Method for combining the partially decrypted ciphertexts * and getting the final decryption in the clear. * * @param &partialCiphertextVec vector of "partial" decryptions. * @param *plaintext the plaintext output. * @return the decoding result. */ DecryptResult MultipartyDecryptFusion( const vector<Ciphertext<Element>>& partialCiphertextVec, Plaintext* plaintext) const; /** * Threshold FHE: Generates a joined evaluation key * from the current secret share and a prior joined * evaluation key * * @param originalPrivateKey secret key transformed from. * @param newPrivateKey secret key transformed to. * @param ek the prior joined evaluation key. * @return the new joined evaluation key. */ LPEvalKey<Element> MultiKeySwitchGen( const LPPrivateKey<Element> originalPrivateKey, const LPPrivateKey<Element> newPrivateKey, const LPEvalKey<Element> ek) const { if (!originalPrivateKey) PALISADE_THROW(config_error, "Input first private key is nullptr"); if (!newPrivateKey) PALISADE_THROW(config_error, "Input second private key is nullptr"); if (!ek) PALISADE_THROW(config_error, "Input evaluation key is nullptr"); TimeVar t; if (doTiming) TIC(t); auto r = GetEncryptionAlgorithm()->MultiKeySwitchGen(originalPrivateKey, newPrivateKey, ek); if (doTiming) { timeSamples->push_back(TimingInfo(OpMultiKeySwitchGen, TOC_US(t))); } return r; } /** * Threshold FHE: Generates joined automorphism keys * from the current secret share and prior joined * automorphism keys * * @param privateKey secret key share. * @param eAuto a dictionary with prior joined automorphism keys. * @param &indexList a vector of automorphism indices. * @param keyId - new key identifier used for the resulting evaluation key * @return a dictionary with new joined automorphism keys. */ shared_ptr<std::map<usint, LPEvalKey<Element>>> MultiEvalAutomorphismKeyGen( const LPPrivateKey<Element> privateKey, const shared_ptr<std::map<usint, LPEvalKey<Element>>> eAuto, const std::vector<usint>& indexList, const std::string& keyId = "") { if (!privateKey) PALISADE_THROW(config_error, "Input private key is nullptr"); if (!eAuto) PALISADE_THROW(config_error, "Input evaluation key map is nullptr"); if (!indexList.size()) PALISADE_THROW(config_error, "Input index vector is empty"); TimeVar t; if (doTiming) TIC(t); auto r = GetEncryptionAlgorithm()->MultiEvalAutomorphismKeyGen( privateKey, eAuto, indexList, keyId); if (doTiming) { timeSamples->push_back( TimingInfo(OpMultiEvalAutomorphismKeyGen, TOC_US(t))); } return r; } /** * Threshold FHE: Generates joined rotation keys * from the current secret share and prior joined * rotation keys * * @param privateKey secret key share. * @param eAuto a dictionary with prior joined rotation keys. * @param &indexList a vector of rotation indices. * @param keyId - new key identifier used for the resulting evaluation key * @return a dictionary with new joined rotation keys. */ shared_ptr<std::map<usint, LPEvalKey<Element>>> MultiEvalAtIndexKeyGen( const LPPrivateKey<Element> privateKey, const shared_ptr<std::map<usint, LPEvalKey<Element>>> eAuto, const std::vector<int32_t>& indexList, const std::string& keyId = "") { if (!privateKey) PALISADE_THROW(config_error, "Input private key is nullptr"); if (!eAuto) PALISADE_THROW(config_error, "Input evaluation key map is nullptr"); if (!indexList.size()) PALISADE_THROW(config_error, "Input index vector is empty"); TimeVar t; if (doTiming) TIC(t); auto r = GetEncryptionAlgorithm()->MultiEvalAtIndexKeyGen(privateKey, eAuto, indexList, keyId); if (doTiming) { timeSamples->push_back(TimingInfo(OpMultiEvalAtIndexKeyGen, TOC_US(t))); } return r; } /** * Threshold FHE: Generates joined summation evaluation keys * from the current secret share and prior joined * summation keys * * @param privateKey secret key share. * @param eSum a dictionary with prior joined summation keys. * @param keyId - new key identifier used for the resulting evaluation key * @return new joined summation keys. */ shared_ptr<std::map<usint, LPEvalKey<Element>>> MultiEvalSumKeyGen( const LPPrivateKey<Element> privateKey, const shared_ptr<std::map<usint, LPEvalKey<Element>>> eSum, const std::string& keyId = "") { if (!privateKey) PALISADE_THROW(config_error, "Input private key is nullptr"); if (!eSum) PALISADE_THROW(config_error, "Input evaluation key map is nullptr"); TimeVar t; if (doTiming) TIC(t); auto r = GetEncryptionAlgorithm()->MultiEvalSumKeyGen(privateKey, eSum, keyId); if (doTiming) { timeSamples->push_back(TimingInfo(OpMultiEvalSumKeyGen, TOC_US(t))); } return r; } /** * Threshold FHE: Adds two prior evaluation keys * * @param a first evaluation key. * @param b second evaluation key. * @param keyId - new key identifier used for the resulting evaluation key * @return the new joined key. */ LPEvalKey<Element> MultiAddEvalKeys(LPEvalKey<Element> a, LPEvalKey<Element> b, const std::string& keyId = "") { if (!a) PALISADE_THROW(config_error, "Input first evaluation key is nullptr"); if (!b) PALISADE_THROW(config_error, "Input second evaluation key is nullptr"); TimeVar t; if (doTiming) TIC(t); auto r = GetEncryptionAlgorithm()->MultiAddEvalKeys(a, b, keyId); if (doTiming) { timeSamples->push_back(TimingInfo(OpMultiAddEvalKeys, TOC_US(t))); } return r; } /** * Threshold FHE: Generates a partial evaluation key for homomorphic * multiplication based on the current secret share and an existing partial * evaluation key * * @param evalKey prior evaluation key. * @param sk current secret share. * @param keyId - new key identifier used for the resulting evaluation key * @return the new joined key. */ LPEvalKey<Element> MultiMultEvalKey(LPEvalKey<Element> evalKey, LPPrivateKey<Element> sk, const std::string& keyId = "") { if (!evalKey) PALISADE_THROW(config_error, "Input evaluation key is nullptr"); if (!sk) PALISADE_THROW(config_error, "Input private key is nullptr"); TimeVar t; if (doTiming) TIC(t); auto r = GetEncryptionAlgorithm()->MultiMultEvalKey(evalKey, sk, keyId); if (doTiming) { timeSamples->push_back(TimingInfo(OpMultiMultEvalKey, TOC_US(t))); } return r; } /** * Threshold FHE: Adds two prior evaluation key sets for summation * * @param es1 first summation key set. * @param es2 second summation key set. * @param keyId - new key identifier used for the resulting evaluation key * @return the new joined key set for summation. */ shared_ptr<std::map<usint, LPEvalKey<Element>>> MultiAddEvalSumKeys( const shared_ptr<std::map<usint, LPEvalKey<Element>>> es1, const shared_ptr<std::map<usint, LPEvalKey<Element>>> es2, const std::string& keyId = "") { if (!es1) PALISADE_THROW(config_error, "Input first evaluation key map is nullptr"); if (!es2) PALISADE_THROW(config_error, "Input second evaluation key map is nullptr"); TimeVar t; if (doTiming) TIC(t); auto r = GetEncryptionAlgorithm()->MultiAddEvalSumKeys(es1, es2, keyId); if (doTiming) { timeSamples->push_back(TimingInfo(OpMultiAddEvalSumKeys, TOC_US(t))); } return r; } /** * Threshold FHE: Adds two prior evaluation key sets for automorphisms * * @param es1 first automorphism key set. * @param es2 second automorphism key set. * @param keyId - new key identifier used for the resulting evaluation key. * @return the new joined key set for summation. */ shared_ptr<std::map<usint, LPEvalKey<Element>>> MultiAddEvalAutomorphismKeys( const shared_ptr<std::map<usint, LPEvalKey<Element>>> es1, const shared_ptr<std::map<usint, LPEvalKey<Element>>> es2, const std::string& keyId = "") { if (!es1) PALISADE_THROW(config_error, "Input first evaluation key map is nullptr"); if (!es2) PALISADE_THROW(config_error, "Input second evaluation key map is nullptr"); TimeVar t; if (doTiming) TIC(t); auto r = GetEncryptionAlgorithm()->MultiAddEvalAutomorphismKeys(es1, es2, keyId); if (doTiming) { timeSamples->push_back( TimingInfo(OpMultiAddEvalAutomorphismKeys, TOC_US(t))); } return r; } /** * Threshold FHE: Adds two partial public keys * * @param pubKey1 first public key. * @param pubKey2 second public key. * @param keyId - new key identifier used for the resulting evaluation key. * @return the new joined key. */ LPPublicKey<Element> MultiAddPubKeys(LPPublicKey<Element> pubKey1, LPPublicKey<Element> pubKey2, const std::string& keyId = "") { if (!pubKey1) PALISADE_THROW(config_error, "Input first public key is nullptr"); if (!pubKey2) PALISADE_THROW(config_error, "Input second public key is nullptr"); TimeVar t; if (doTiming) TIC(t); auto r = GetEncryptionAlgorithm()->MultiAddPubKeys(pubKey1, pubKey2, keyId); if (doTiming) { timeSamples->push_back(TimingInfo(OpMultiAddPubKeys, TOC_US(t))); } return r; } /** * Threshold FHE: Adds two partial evaluation keys for multiplication * * @param evalKey1 first evaluation key. * @param evalKey2 second evaluation key. * @param keyId - new key identifier used for the resulting evaluation key. * @return the new joined key. */ LPEvalKey<Element> MultiAddEvalMultKeys(LPEvalKey<Element> evalKey1, LPEvalKey<Element> evalKey2, const std::string& keyId = "") { if (!evalKey1) PALISADE_THROW(config_error, "Input first evaluation key is nullptr"); if (!evalKey2) PALISADE_THROW(config_error, "Input second evaluation key is nullptr"); TimeVar t; if (doTiming) TIC(t); auto r = GetEncryptionAlgorithm()->MultiAddEvalMultKeys(evalKey1, evalKey2, keyId); if (doTiming) { timeSamples->push_back(TimingInfo(OpMultiAddEvalMultKeys, TOC_US(t))); } return r; } /** * SparseKeyGen generates a key pair with special structure, and without full * entropy, for use in special cases like Ring Reduction * @return a public/secret key pair */ LPKeyPair<Element> SparseKeyGen() { TimeVar t; if (doTiming) TIC(t); auto r = GetEncryptionAlgorithm()->KeyGen( CryptoContextFactory<Element>::GetContextForPointer(this), true); if (doTiming) { timeSamples->push_back(TimingInfo(OpSparseKeyGen, TOC_US(t))); } return r; } /** * ReKeyGen produces an Eval Key that PALISADE can use for Proxy Re Encryption * @param newKey (public) * @param oldKey (private) * @return new evaluation key */ LPEvalKey<Element> ReKeyGen(const LPPublicKey<Element> newKey, const LPPrivateKey<Element> oldKey) const { if (newKey == nullptr || oldKey == nullptr || Mismatched(newKey->GetCryptoContext()) || Mismatched(oldKey->GetCryptoContext())) PALISADE_THROW(config_error, "Keys passed to ReKeyGen were not generated with this " "crypto context"); TimeVar t; if (doTiming) TIC(t); auto r = GetEncryptionAlgorithm()->ReKeyGen(newKey, oldKey); if (doTiming) { timeSamples->push_back(TimingInfo(OpReKeyGenPubPri, TOC_US(t))); } return r; } /** * ReKeyGen produces an Eval Key that PALISADE can use for Proxy Re Encryption * NOTE this functionality has been completely removed from PALISADE * @param newKey (private) * @param oldKey (private) * @return new evaluation key */ LPEvalKey<Element> ReKeyGen(const LPPrivateKey<Element> newKey, const LPPrivateKey<Element> oldKey) const __attribute__((deprecated("functionality removed from PALISADE"))); /** * EvalMultKeyGen creates a key that can be used with the PALISADE EvalMult * operator * the new evaluation key is stored in cryptocontext * @param key */ void EvalMultKeyGen(const LPPrivateKey<Element> key); /** * EvalMultsKeyGen creates a vector evalmult keys that can be used with the * PALISADE EvalMult operator 1st key (for s^2) is used for multiplication of * ciphertexts of depth 1 2nd key (for s^3) is used for multiplication of * ciphertexts of depth 2, etc. * a vector of new evaluation keys is stored in crytpocontext * * @param key */ void EvalMultKeysGen(const LPPrivateKey<Element> key); /** * GetEvalMultKeyVector fetches the eval mult keys for a given KeyID * @param keyID * @return key vector from ID */ static const vector<LPEvalKey<Element>>& GetEvalMultKeyVector( const string& keyID); /** * GetEvalMultKeys * @return map of all the keys */ static std::map<string, std::vector<LPEvalKey<Element>>>& GetAllEvalMultKeys(); /** * KeySwitchGen creates a key that can be used with the PALISADE KeySwitch * operation * @param key1 * @param key2 * @return new evaluation key */ LPEvalKey<Element> KeySwitchGen(const LPPrivateKey<Element> key1, const LPPrivateKey<Element> key2) const { if (key1 == nullptr || key2 == nullptr || Mismatched(key1->GetCryptoContext()) || Mismatched(key2->GetCryptoContext())) PALISADE_THROW(config_error, "Keys passed to KeySwitchGen were not generated with this " "crypto context"); TimeVar t; if (doTiming) TIC(t); auto r = GetEncryptionAlgorithm()->KeySwitchGen(key1, key2); if (doTiming) { timeSamples->push_back(TimingInfo(OpKeySwitchGen, TOC_US(t))); } return r; } /** * Encrypt a plaintext using a given public key * @param publicKey * @param plaintext * @return ciphertext (or null on failure) */ Ciphertext<Element> Encrypt(const LPPublicKey<Element> publicKey, Plaintext plaintext) { if (publicKey == nullptr) PALISADE_THROW(type_error, "null key passed to Encrypt"); if (plaintext == nullptr) PALISADE_THROW(type_error, "Input plaintext is nullptr"); if (Mismatched(publicKey->GetCryptoContext())) PALISADE_THROW( config_error, "key passed to Encrypt was not generated with this crypto context"); TimeVar t; if (doTiming) TIC(t); Ciphertext<Element> ciphertext = GetEncryptionAlgorithm()->Encrypt( publicKey, plaintext->GetElement<Element>()); if (ciphertext) { ciphertext->SetEncodingType(plaintext->GetEncodingType()); ciphertext->SetScalingFactor(plaintext->GetScalingFactor()); ciphertext->SetDepth(plaintext->GetDepth()); ciphertext->SetLevel(plaintext->GetLevel()); } if (doTiming) { timeSamples->push_back(TimingInfo(OpEncryptPub, TOC_US(t))); } return ciphertext; } /** * Encrypt a plaintext using a given private key * @param privateKey * @param plaintext * @return ciphertext (or null on failure) */ Ciphertext<Element> Encrypt(const LPPrivateKey<Element> privateKey, Plaintext plaintext) const { if (privateKey == nullptr || Mismatched(privateKey->GetCryptoContext())) PALISADE_THROW( config_error, "key passed to Encrypt was not generated with this crypto context"); if (plaintext == nullptr) PALISADE_THROW(type_error, "Input plaintext is nullptr"); TimeVar t; if (doTiming) TIC(t); Ciphertext<Element> ciphertext = GetEncryptionAlgorithm()->Encrypt( privateKey, plaintext->GetElement<Element>()); if (ciphertext) { ciphertext->SetEncodingType(plaintext->GetEncodingType()); ciphertext->SetScalingFactor(plaintext->GetScalingFactor()); ciphertext->SetDepth(plaintext->GetDepth()); ciphertext->SetLevel(plaintext->GetLevel()); } if (doTiming) { timeSamples->push_back(TimingInfo(OpEncryptPriv, TOC_US(t))); } return ciphertext; } /** * Encrypt a matrix of Plaintext * @param publicKey - for encryption * @param plaintext - to encrypt * @param doEncryption encrypts if true, embeds (encodes) the plaintext into * cryptocontext if false * @return a vector of pointers to Ciphertexts created by encrypting the * plaintext */ shared_ptr<Matrix<RationalCiphertext<Element>>> EncryptMatrix( const LPPublicKey<Element> publicKey, Matrix<Plaintext>& plaintext) { if (publicKey == nullptr || Mismatched(publicKey->GetCryptoContext())) PALISADE_THROW(config_error, "key passed to EncryptMatrix was not generated with this " "crypto context"); auto zeroAlloc = [=]() { return RationalCiphertext<Element>(publicKey->GetCryptoContext(), true); }; auto cipherResults = std::make_shared<Matrix<RationalCiphertext<Element>>>( zeroAlloc, plaintext.GetRows(), plaintext.GetCols()); TimeVar t; if (doTiming) TIC(t); for (size_t row = 0; row < plaintext.GetRows(); row++) { for (size_t col = 0; col < plaintext.GetCols(); col++) { if (plaintext(row, col)->Encode() == false) return 0; Ciphertext<Element> ciphertext = GetEncryptionAlgorithm()->Encrypt( publicKey, plaintext(row, col)->GetElement<Element>()); if (ciphertext) { ciphertext->SetEncodingType(plaintext(row, col)->GetEncodingType()); } (*cipherResults)(row, col).SetNumerator(ciphertext); } } if (doTiming) { timeSamples->push_back(TimingInfo(OpEncryptMatrixPlain, TOC_US(t))); } return cipherResults; } /** * Encrypt a matrix of Plaintext * @param publicKey - for encryption * @param plaintext - to encrypt * @param doEncryption encrypts if true, embeds (encodes) the plaintext into * cryptocontext if false * @return a vector of pointers to Ciphertexts created by encrypting the * plaintext */ Matrix<Ciphertext<Element>> EncryptMatrixCiphertext( const LPPublicKey<Element> publicKey, Matrix<Plaintext>& plaintext) { if (publicKey == nullptr || Mismatched(publicKey->GetCryptoContext())) PALISADE_THROW(config_error, "key passed to EncryptMatrix was not generated with this " "crypto context"); auto zeroAlloc = [=]() { return Ciphertext<Element>(std::make_shared<CiphertextImpl<Element>>( publicKey->GetCryptoContext())); }; Matrix<Ciphertext<Element>> cipherResults(zeroAlloc, plaintext.GetRows(), plaintext.GetCols()); TimeVar t; if (doTiming) TIC(t); for (size_t row = 0; row < plaintext.GetRows(); row++) { for (size_t col = 0; col < plaintext.GetCols(); col++) { if (plaintext(row, col)->Encode() == false) PALISADE_THROW(math_error, "Plaintext is not encoded"); Ciphertext<Element> ciphertext = GetEncryptionAlgorithm()->Encrypt( publicKey, plaintext(row, col)->GetElement<Element>()); if (ciphertext) { ciphertext->SetEncodingType(plaintext(row, col)->GetEncodingType()); } cipherResults(row, col) = (ciphertext); } } if (doTiming) { timeSamples->push_back(TimingInfo(OpEncryptMatrixPlain, TOC_US(t))); } return cipherResults; } /** * Perform an encryption by reading plaintext from a stream, serializing each * piece of ciphertext, and writing the serializations to an output stream * @param publicKey - the encryption key in use * @param instream - where to read the input from * @param ostream - where to write the serialization to * @param doEncryption encrypts if true, embeds (encodes) the plaintext into * cryptocontext if false * @return */ void EncryptStream(const LPPublicKey<Element> publicKey, std::istream& instream, std::ostream& outstream) const __attribute__(( deprecated("serialization changed, see wiki for details"))); // PLAINTEXT FACTORY METHODS // FIXME to be deprecated in 2.0 /** * MakeScalarPlaintext constructs a ScalarEncoding in this context * @param value * @param isSigned * @return plaintext */ Plaintext MakeScalarPlaintext(int64_t value) const { auto p = PlaintextFactory::MakePlaintext(Scalar, this->GetElementParams(), this->GetEncodingParams(), value); return p; } /** * MakeStringPlaintext constructs a StringEncoding in this context * @param str * @return plaintext */ Plaintext MakeStringPlaintext(const string& str) const { auto p = PlaintextFactory::MakePlaintext(String, this->GetElementParams(), this->GetEncodingParams(), str); return p; } /** * MakeIntegerPlaintext constructs an IntegerEncoding in this context * @param value * @return plaintext */ Plaintext MakeIntegerPlaintext(int64_t value) const { auto p = PlaintextFactory::MakePlaintext(Integer, this->GetElementParams(), this->GetEncodingParams(), value); return p; } /** * MakeIntegerPlaintext constructs a FractionalEncoding in this context * @param value * @param truncatedBits limit on fractional * @return plaintext */ Plaintext MakeFractionalPlaintext(int64_t value, size_t truncatedBits = 0) const { auto p = PlaintextFactory::MakePlaintext( Fractional, this->GetElementParams(), this->GetEncodingParams(), value, truncatedBits); return p; } /** * MakeCoefPackedPlaintext constructs a CoefPackedEncoding in this context * @param value * @return plaintext */ Plaintext MakeCoefPackedPlaintext(const vector<int64_t>& value) const { auto p = PlaintextFactory::MakePlaintext( CoefPacked, this->GetElementParams(), this->GetEncodingParams(), value); return p; } /** * MakePackedPlaintext constructs a PackedEncoding in this context * @param value * @return plaintext */ Plaintext MakePackedPlaintext(const vector<int64_t>& value) const { auto p = PlaintextFactory::MakePlaintext(Packed, this->GetElementParams(), this->GetEncodingParams(), value); return p; } /** * MakePlaintext static that takes a cc and calls the Plaintext Factory * @param encoding * @param cc * @param value * @return */ template <typename Value1> static Plaintext MakePlaintext(PlaintextEncodings encoding, CryptoContext<Element> cc, const Value1& value) { return PlaintextFactory::MakePlaintext(encoding, cc->GetElementParams(), cc->GetEncodingParams(), value); } template <typename Value1, typename Value2> static Plaintext MakePlaintext(PlaintextEncodings encoding, CryptoContext<Element> cc, const Value1& value, const Value2& value2) { return PlaintextFactory::MakePlaintext(encoding, cc->GetElementParams(), cc->GetEncodingParams(), value, value2); } /** * COMPLEX ARITHMETIC IS NOT AVAILABLE STARTING WITH 1.10.6, * AND THIS METHOD BE DEPRECATED. USE THE REAL-NUMBER METHOD INSTEAD. * MakeCKKSPackedPlaintext constructs a CKKSPackedEncoding in this context * from a vector of complex numbers * @param value - input vector * @paran depth - depth used to encode the vector * @param level - level at each the vector will get encrypted * @param params - parameters to be usef for the ciphertext * @return plaintext */ virtual Plaintext MakeCKKSPackedPlaintext( const std::vector<std::complex<double>>& value, size_t depth = 1, uint32_t level = 0, const shared_ptr<ParmType> params = nullptr) const { Plaintext p; const auto cryptoParamsCKKS = std::dynamic_pointer_cast<LPCryptoParametersCKKS<DCRTPoly>>( this->GetCryptoParameters()); double scFact = cryptoParamsCKKS->GetScalingFactorOfLevel(level); if (params == nullptr) { shared_ptr<ILDCRTParams<DCRTPoly::Integer>> elemParamsPtr; if (level != 0) { ILDCRTParams<DCRTPoly::Integer> elemParams = *(cryptoParamsCKKS->GetElementParams()); for (uint32_t i = 0; i < level; i++) { elemParams.PopLastParam(); } elemParamsPtr = std::make_shared<ILDCRTParams<DCRTPoly::Integer>>(elemParams); } else { elemParamsPtr = cryptoParamsCKKS->GetElementParams(); } p = Plaintext(std::make_shared<CKKSPackedEncoding>( elemParamsPtr, this->GetEncodingParams(), value, depth, level, scFact)); } else { p = Plaintext(std::make_shared<CKKSPackedEncoding>( params, this->GetEncodingParams(), value, depth, level, scFact)); } p->Encode(); return p; } /** * MakeCKKSPackedPlaintext constructs a CKKSPackedEncoding in this context * from a vector of real numbers * @param value - input vector * @paran depth - depth used to encode the vector * @param level - level at each the vector will get encrypted * @param params - parameters to be usef for the ciphertext * @return plaintext */ virtual Plaintext MakeCKKSPackedPlaintext( const std::vector<double>& value, size_t depth = 1, uint32_t level = 0, const shared_ptr<ParmType> params = nullptr) const { std::vector<std::complex<double>> complexValue(value.size()); std::transform(value.begin(), value.end(), complexValue.begin(), [](double da) { return std::complex<double>(da); }); return MakeCKKSPackedPlaintext(complexValue, depth, level, params); } /** * GetPlaintextForDecrypt returns a new Plaintext to be used in decryption. * * @param pte Type of plaintext we want to return * @param evp Element parameters * @param ep Encoding parameters * @return plaintext */ static Plaintext GetPlaintextForDecrypt(PlaintextEncodings pte, shared_ptr<ParmType> evp, EncodingParams ep); public: /** * Decrypt a single ciphertext into the appropriate plaintext * * @param privateKey - decryption key * @param ciphertext - ciphertext to decrypt * @param plaintext - resulting plaintext object pointer is here * @return */ DecryptResult Decrypt(const LPPrivateKey<Element> privateKey, ConstCiphertext<Element> ciphertext, Plaintext* plaintext); /** * Decrypt method for a matrix of ciphertexts * @param privateKey - for decryption * @param ciphertext - matrix of encrypted ciphertexts * @param plaintext - pointer to the destination martrix of plaintexts * @return size of plaintext */ DecryptResult DecryptMatrix( const LPPrivateKey<Element> privateKey, const shared_ptr<Matrix<RationalCiphertext<Element>>> ciphertext, shared_ptr<Matrix<Plaintext>>* numerator, shared_ptr<Matrix<Plaintext>>* denominator) const { if (ciphertext == nullptr) PALISADE_THROW(type_error, "Input ciphertext is nullptr"); if (privateKey == nullptr || Mismatched(privateKey->GetCryptoContext())) PALISADE_THROW(config_error, "Information passed to DecryptMatrix was not generated " "with this crypto context"); // edge case if ((ciphertext->GetCols() == 0) && (ciphertext->GetRows() == 0)) return DecryptResult(); const Ciphertext<Element> ctN = (*ciphertext)(0, 0).GetNumerator(); // need to build matrices for the result Plaintext ptx = GetPlaintextForDecrypt(ctN->GetEncodingType(), this->GetElementParams(), this->GetEncodingParams()); auto zeroPackingAlloc = [=]() { return Plaintext(ptx); }; *numerator = std::make_shared<Matrix<Plaintext>>( zeroPackingAlloc, ciphertext->GetRows(), ciphertext->GetCols()); *denominator = std::make_shared<Matrix<Plaintext>>( zeroPackingAlloc, ciphertext->GetRows(), ciphertext->GetCols()); TimeVar t; if (doTiming) TIC(t); for (size_t row = 0; row < ciphertext->GetRows(); row++) { for (size_t col = 0; col < ciphertext->GetCols(); col++) { if (Mismatched((*ciphertext)(row, col).GetCryptoContext())) PALISADE_THROW(config_error, "A ciphertext passed to DecryptMatrix was not " "generated with this crypto context"); const Ciphertext<Element> ctN = (*ciphertext)(row, col).GetNumerator(); // determine which type of plaintext that you need to decrypt into Plaintext decryptedNumerator = GetPlaintextForDecrypt( ctN->GetEncodingType(), this->GetElementParams(), this->GetEncodingParams()); DecryptResult resultN = GetEncryptionAlgorithm()->Decrypt( privateKey, ctN, &decryptedNumerator->GetElement<NativePoly>()); if (resultN.isValid == false) return resultN; (**numerator)(row, col) = decryptedNumerator; (**numerator)(row, col)->Decode(); Plaintext decryptedDenominator = GetPlaintextForDecrypt( ctN->GetEncodingType(), this->GetElementParams(), this->GetEncodingParams()); if ((*ciphertext)(row, col).GetIntegerFlag() == true) { decryptedDenominator->GetElement<Poly>().SetValuesToZero(); decryptedDenominator->GetElement<Poly>().at(0) = 1; } else { const Ciphertext<Element> ctD = (*ciphertext)(row, col).GetDenominator(); DecryptResult resultD = GetEncryptionAlgorithm()->Decrypt( privateKey, ctD, &decryptedDenominator->GetElement<NativePoly>()); if (resultD.isValid == false) return resultD; (**denominator)(row, col) = decryptedDenominator; } (**denominator)(row, col)->Decode(); } } if (doTiming) { timeSamples->push_back(TimingInfo(OpDecryptMatrixPlain, TOC_US(t))); } return DecryptResult( (**numerator)((*numerator)->GetRows() - 1, (*numerator)->GetCols() - 1) ->GetLength()); } /** * Decrypt method for a matrix of ciphertexts * @param privateKey - for decryption * @param ciphertext - matrix of encrypted ciphertexts * @param plaintext - pointer to the destination martrix of plaintexts * @return size of plaintext */ DecryptResult DecryptMatrixCiphertext( const LPPrivateKey<Element> privateKey, const Matrix<Ciphertext<Element>> ciphertext, Matrix<Plaintext>* numerator) const { if (privateKey == nullptr || Mismatched(privateKey->GetCryptoContext())) PALISADE_THROW(config_error, "Information passed to DecryptMatrix was not generated " "with this crypto context"); // edge case if ((ciphertext.GetCols() == 0) && (ciphertext.GetRows() == 0)) return DecryptResult(); const Ciphertext<Element> ctN = (ciphertext)(0, 0); // need to build matrices for the result // Plaintext ptx = // GetPlaintextForDecrypt(ctN->GetEncodingType(), // this->GetElementParams(), // this->GetEncodingParams()); // auto zeroPackingAlloc = [=]() { return Plaintext(ptx); }; // numerator = std::make_shared<Matrix<Plaintext>>(zeroPackingAlloc, // ciphertext.GetRows(), // ciphertext.GetCols()); TimeVar t; if (doTiming) TIC(t); for (size_t row = 0; row < ciphertext.GetRows(); row++) { for (size_t col = 0; col < ciphertext.GetCols(); col++) { if (Mismatched((ciphertext(row, col))->GetCryptoContext())) PALISADE_THROW(config_error, "A ciphertext passed to DecryptMatrix was not " "generated with this crypto context"); const Ciphertext<Element> ctN = (ciphertext)(row, col); // determine which type of plaintext that you need to decrypt into Plaintext decryptedNumerator = GetPlaintextForDecrypt( ctN->GetEncodingType(), this->GetElementParams(), this->GetEncodingParams()); DecryptResult resultN = GetEncryptionAlgorithm()->Decrypt( privateKey, ctN, &decryptedNumerator->GetElement<NativePoly>()); if (resultN.isValid == false) return resultN; (*numerator)(row, col) = decryptedNumerator; (*numerator)(row, col)->Decode(); } } if (doTiming) { timeSamples->push_back(TimingInfo(OpDecryptMatrixPlain, TOC_US(t))); } return DecryptResult( (*numerator)(numerator->GetRows() - 1, numerator->GetCols() - 1) ->GetLength()); } /** * Decrypt method for numerators in a matrix of ciphertexts (packed encoding) * @param privateKey - for decryption * @param ciphertext - matrix of encrypted ciphertexts * @param plaintext - pointer to the destination martrix of plaintexts * @return size of plaintext */ DecryptResult DecryptMatrixNumerator( const LPPrivateKey<Element> privateKey, const shared_ptr<Matrix<RationalCiphertext<Element>>> ciphertext, shared_ptr<Matrix<Plaintext>>* numerator) const { if (ciphertext == nullptr) PALISADE_THROW(type_error, "Input ciphertext is nullptr"); if (privateKey == nullptr || Mismatched(privateKey->GetCryptoContext())) PALISADE_THROW(config_error, "Information passed to DecryptMatrix was not generated " "with this crypto context"); // edge case if ((ciphertext->GetCols() == 0) && (ciphertext->GetRows() == 0)) return DecryptResult(); TimeVar t; if (doTiming) TIC(t); // force all precomputations to take place in advance if (Mismatched((*ciphertext)(0, 0).GetCryptoContext())) PALISADE_THROW(config_error, "A ciphertext passed to DecryptMatrix was not generated " "with this crypto context"); const Ciphertext<Element> ctN = (*ciphertext)(0, 0).GetNumerator(); // need to build a numerator matrix for the result Plaintext ptx = GetPlaintextForDecrypt(ctN->GetEncodingType(), this->GetElementParams(), this->GetEncodingParams()); auto zeroPackingAlloc = [=]() { return Plaintext(ptx); }; *numerator = std::make_shared<Matrix<Plaintext>>( zeroPackingAlloc, ciphertext->GetRows(), ciphertext->GetCols()); Plaintext decryptedNumerator = GetPlaintextForDecrypt(ctN->GetEncodingType(), this->GetElementParams(), this->GetEncodingParams()); DecryptResult resultN = GetEncryptionAlgorithm()->Decrypt( privateKey, ctN, &decryptedNumerator->GetElement<NativePoly>()); if (resultN.isValid == false) return resultN; (**numerator)(0, 0) = decryptedNumerator; (**numerator)(0, 0)->Decode(); for (size_t row = 0; row < ciphertext->GetRows(); row++) { #pragma omp parallel for for (size_t col = 0; col < ciphertext->GetCols(); col++) { if (row + col > 0) { if (Mismatched((*ciphertext)(row, col).GetCryptoContext())) PALISADE_THROW(config_error, "A ciphertext passed to DecryptMatrix was not " "generated with this crypto context"); const Ciphertext<Element> ctN = (*ciphertext)(row, col).GetNumerator(); Plaintext decryptedNumerator = GetPlaintextForDecrypt( ctN->GetEncodingType(), this->GetElementParams(), this->GetEncodingParams()); GetEncryptionAlgorithm()->Decrypt( privateKey, ctN, &decryptedNumerator->GetElement<NativePoly>()); (**numerator)(row, col) = decryptedNumerator; (**numerator)(row, col)->Decode(); } } } if (doTiming) { timeSamples->push_back(TimingInfo(OpDecryptMatrixPacked, TOC_US(t))); } return DecryptResult( (**numerator)((*numerator)->GetRows() - 1, (*numerator)->GetCols() - 1) ->GetLength()); } /** * read instream for a sequence of serialized ciphertext; deserialize it, * decrypt it, and write it to outstream * @param privateKey - reference to the decryption key * @param instream - input stream with sequence of serialized ciphertexts * @param outstream - output stream for plaintext * @return total bytes processed */ size_t DecryptStream(const LPPrivateKey<Element> privateKey, std::istream& instream, std::ostream& outstream) __attribute__(( deprecated("serialization changed, see wiki for details"))); /** * ReEncrypt - Proxy Re Encryption mechanism for PALISADE * @param evalKey - evaluation key from the PRE keygen method * @param ciphertext - vector of shared pointers to encrypted Ciphertext * @param publicKey the public key of the recipient of the re-encrypted * ciphertext. * @return vector of shared pointers to re-encrypted ciphertexts */ Ciphertext<Element> ReEncrypt( LPEvalKey<Element> evalKey, ConstCiphertext<Element> ciphertext, const LPPublicKey<Element> publicKey = nullptr) const { if (evalKey == nullptr || Mismatched(evalKey->GetCryptoContext())) PALISADE_THROW(config_error, "Information passed to ReEncrypt was not generated with " "this crypto context"); if (ciphertext == nullptr || Mismatched(ciphertext->GetCryptoContext())) PALISADE_THROW(config_error, "The ciphertext passed to ReEncrypt was not generated " "with this crypto context"); TimeVar t; if (doTiming) TIC(t); Ciphertext<Element> newCiphertext = GetEncryptionAlgorithm()->ReEncrypt(evalKey, ciphertext, publicKey); if (doTiming) { timeSamples->push_back(TimingInfo(OpReEncrypt, TOC_US(t))); } return newCiphertext; } /** * read instream for a serialized ciphertext. deserialize, re-encrypt, * serialize, and write to outstream * @param evalKey - reference to the re-encryption key * @param instream - input stream with sequence of serialized ciphertext * @param outstream - output stream with sequence of serialized re-encrypted * ciphertext */ void ReEncryptStream(const LPEvalKey<Element> evalKey, std::istream& instream, std::ostream& outstream, const LPPublicKey<Element> publicKey = nullptr) __attribute__(( deprecated("serialization changed, see wiki for details"))); /** * EvalAdd - PALISADE EvalAdd method for a pair of ciphertexts * @param ct1 * @param ct2 * @return new ciphertext for ct1 + ct2 */ Ciphertext<Element> EvalAdd(ConstCiphertext<Element> ct1, ConstCiphertext<Element> ct2) const { TypeCheck(ct1, ct2); TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalAdd(ct1, ct2); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalAdd, TOC_US(t))); } return rv; } /** * EvalAdd - PALISADE EvalAddMutable method for a pair of ciphertexts. * This is a mutable version - input ciphertexts may get automatically * rescaled, or level-reduced. * * @param ct1 * @param ct2 * @return new ciphertext for ct1 + ct2 */ Ciphertext<Element> EvalAddMutable(Ciphertext<Element>& ct1, Ciphertext<Element>& ct2) const { TypeCheck(ct1, ct2); TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalAddMutable(ct1, ct2); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalAdd, TOC_US(t))); } return rv; } /** * EvalAddMatrix - PALISADE EvalAdd method for a pair of matrices of * ciphertexts * @param ct1 * @param ct2 * @return new matrix for ct1 + ct2 */ shared_ptr<Matrix<RationalCiphertext<Element>>> EvalAddMatrix( const shared_ptr<Matrix<RationalCiphertext<Element>>> ct1, const shared_ptr<Matrix<RationalCiphertext<Element>>> ct2) const { TypeCheck((*ct1)(0, 0), (*ct2)(0, 0)); // TODO only checking one; when Matrix is // refactored, this should be revisited TimeVar t; if (doTiming) TIC(t); Matrix<RationalCiphertext<Element>> rv = *ct1 + *ct2; if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalAddMatrix, TOC_US(t))); } return std::make_shared<Matrix<RationalCiphertext<Element>>>(rv); } /** * EvalAddMatrix - PALISADE EvalAdd method for a pair of matrices of * ciphertexts * @param ct1 * @param ct2 * @return new matrix for ct1 + ct2 */ Matrix<Ciphertext<Element>> EvalAddMatrix( const Matrix<Ciphertext<Element>>& ct1, const Matrix<Ciphertext<Element>>& ct2) const { TypeCheck(ct1(0, 0), ct2(0, 0)); // TODO only checking one; when Matrix is // refactored, this should be revisited TimeVar t; if (doTiming) TIC(t); Matrix<Ciphertext<Element>> rv = ct1 + ct2; if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalAddMatrix, TOC_US(t))); } // Matrix<Ciphertext<Element>> a(rv); return rv; } /** * EvalSub - PALISADE EvalSub method for a pair of ciphertexts * @param ct1 * @param ct2 * @return new ciphertext for ct1 - ct2 */ Ciphertext<Element> EvalSub(ConstCiphertext<Element> ct1, ConstCiphertext<Element> ct2) const { TypeCheck(ct1, ct2); TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalSub(ct1, ct2); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalSub, TOC_US(t))); } return rv; } /** * EvalSub - PALISADE EvalSubMutable method for a pair of ciphertexts * This is a mutable version - input ciphertexts may get automatically * rescaled, or level-reduced. * * @param ct1 * @param ct2 * @return new ciphertext for ct1 - ct2 */ Ciphertext<Element> EvalSubMutable(Ciphertext<Element>& ct1, Ciphertext<Element>& ct2) const { TypeCheck(ct1, ct2); TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalSubMutable(ct1, ct2); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalSub, TOC_US(t))); } return rv; } /** * EvalSubMatrix - PALISADE EvalSub method for a pair of matrices of * ciphertexts * @param ct1 * @param ct2 * @return new matrix for ct1 + ct2 */ shared_ptr<Matrix<RationalCiphertext<Element>>> EvalSubMatrix( const shared_ptr<Matrix<RationalCiphertext<Element>>> ct1, const shared_ptr<Matrix<RationalCiphertext<Element>>> ct2) const { TypeCheck((*ct1)(0, 0), (*ct2)(0, 0)); // TODO only checking one; when Matrix is // refactored, this should be revisited TimeVar t; if (doTiming) TIC(t); Matrix<RationalCiphertext<Element>> rv = *ct1 - *ct2; if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalSubMatrix, TOC_US(t))); } return std::make_shared<Matrix<RationalCiphertext<Element>>>(rv); } /** * EvalSubMatrix - PALISADE EvalSub method for a pair of matrices of * ciphertexts * @param ct1 * @param ct2 * @return new matrix for ct1 + ct2 */ Matrix<Ciphertext<Element>> EvalSubMatrix( const Matrix<Ciphertext<Element>>& ct1, const Matrix<Ciphertext<Element>>& ct2) const { TypeCheck(ct1(0, 0), ct2(0, 0)); // TODO only checking one; when Matrix is // refactored, this should be revisited TimeVar t; if (doTiming) TIC(t); Matrix<Ciphertext<Element>> rv = ct1 - ct2; if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalSubMatrix, TOC_US(t))); } return rv; } /** * EvalAdd - PALISADE EvalAdd method for a ciphertext and plaintext * @param ciphertext * @param plaintext * @return new ciphertext for ciphertext + plaintext */ Ciphertext<Element> EvalAdd(ConstCiphertext<Element> ciphertext, ConstPlaintext plaintext) const { TypeCheck(ciphertext, plaintext); TimeVar t; if (doTiming) TIC(t); plaintext->SetFormat(EVALUATION); auto rv = GetEncryptionAlgorithm()->EvalAdd(ciphertext, plaintext); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalAddPlain, TOC_US(t))); } return rv; } /** * EvalAdd - PALISADE EvalAddMutable method for a ciphertext and plaintext * This is a mutable version - input ciphertexts may get automatically * rescaled, or level-reduced. * * @param ciphertext * @param plaintext * @return new ciphertext for ciphertext + plaintext */ Ciphertext<Element> EvalAddMutable(Ciphertext<Element>& ciphertext, Plaintext plaintext) const { TypeCheck((ConstCiphertext<Element>)ciphertext, (ConstPlaintext)plaintext); TimeVar t; if (doTiming) TIC(t); plaintext->SetFormat(EVALUATION); auto rv = GetEncryptionAlgorithm()->EvalAddMutable(ciphertext, plaintext); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalAddPlain, TOC_US(t))); } return rv; } /** * EvalAdd - PALISADE EvalAdd method for a ciphertext and constant * @param ciphertext * @param constant * @return new ciphertext for ciphertext + constant */ Ciphertext<Element> EvalAdd(ConstCiphertext<Element> ciphertext, double constant) const { TimeVar t; Ciphertext<Element> rv; if (constant >= 0) { if (doTiming) TIC(t); rv = GetEncryptionAlgorithm()->EvalAdd(ciphertext, constant); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalAddConst, TOC_US(t))); } } else { TimeVar t; if (doTiming) TIC(t); rv = GetEncryptionAlgorithm()->EvalSub(ciphertext, -constant); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalAddConst, TOC_US(t))); } } return rv; } /** * EvalLinearWSum - PALISADE EvalLinearWSum method to compute a linear * weighted sum * * @param ciphertexts a list of ciphertexts * @param constants a list of weights * @return new ciphertext containing the weighted sum */ Ciphertext<Element> EvalLinearWSum(vector<Ciphertext<Element>> ciphertexts, vector<double> constants) const { TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalLinearWSum(ciphertexts, constants); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalLinearWSum, TOC_US(t))); } return rv; } /** * EvalLinearWSum - method to compute a linear weighted sum. * This is a mutable version, meaning the level/depth of input * ciphertexts may change in the process. * * @param ciphertexts a list of ciphertexts * @param constants a list of weights * @return new ciphertext containing the weighted sum */ Ciphertext<Element> EvalLinearWSumMutable( vector<Ciphertext<Element>> ciphertexts, vector<double> constants) const { TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalLinearWSumMutable(ciphertexts, constants); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalLinearWSum, TOC_US(t))); } return rv; } inline Ciphertext<Element> EvalLinearWSum( vector<double> constants, vector<Ciphertext<Element>> ciphertexts) const { return EvalLinearWSum(ciphertexts, constants); } inline Ciphertext<Element> EvalLinearWSumMutable( vector<double> constants, vector<Ciphertext<Element>> ciphertexts) const { return EvalLinearWSumMutable(ciphertexts, constants); } inline Ciphertext<Element> EvalAdd( ConstPlaintext plaintext, ConstCiphertext<Element> ciphertext) const { return EvalAdd(ciphertext, plaintext); } inline Ciphertext<Element> EvalAddMutable( Plaintext plaintext, Ciphertext<Element>& ciphertext) const { return EvalAddMutable(ciphertext, plaintext); } inline Ciphertext<Element> EvalAdd( double constant, ConstCiphertext<Element> ciphertext) const { return EvalAdd(ciphertext, constant); } /** * EvalSubPlain - PALISADE EvalSub method for a ciphertext and plaintext * @param ciphertext * @param plaintext * @return new ciphertext for ciphertext - plaintext */ Ciphertext<Element> EvalSub(ConstCiphertext<Element> ciphertext, ConstPlaintext plaintext) const { TypeCheck(ciphertext, plaintext); TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalSub(ciphertext, plaintext); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalSubPlain, TOC_US(t))); } return rv; } /** * EvalSubPlain - PALISADE EvalSubMutable method for a ciphertext and * plaintext This is a mutable version - input ciphertexts may get * automatically rescaled, or level-reduced. * * @param ciphertext * @param plaintext * @return new ciphertext for ciphertext - plaintext */ Ciphertext<Element> EvalSubMutable(Ciphertext<Element>& ciphertext, Plaintext plaintext) const { TypeCheck((ConstCiphertext<Element>)ciphertext, (ConstPlaintext)plaintext); TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalSubMutable(ciphertext, plaintext); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalSubPlain, TOC_US(t))); } return rv; } /** * EvalSub - PALISADE EvalSub method for a ciphertext and constant * @param ciphertext * @param constant * @return new ciphertext for ciphertext - constant */ Ciphertext<Element> EvalSub(ConstCiphertext<Element> ciphertext, double constant) const { TimeVar t; Ciphertext<Element> rv; if (constant >= 0) { if (doTiming) TIC(t); rv = GetEncryptionAlgorithm()->EvalSub(ciphertext, constant); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalSubConst, TOC_US(t))); } } else { if (doTiming) TIC(t); rv = GetEncryptionAlgorithm()->EvalAdd(ciphertext, -constant); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalSubConst, TOC_US(t))); } } return rv; } inline Ciphertext<Element> EvalSub( ConstPlaintext plaintext, ConstCiphertext<Element> ciphertext) const { return EvalAdd(EvalNegate(ciphertext), plaintext); } inline Ciphertext<Element> EvalSubMutable( Plaintext plaintext, Ciphertext<Element>& ciphertext) const { Ciphertext<Element> negated = EvalNegate(ciphertext); Ciphertext<Element> result = EvalAddMutable(negated, plaintext); ciphertext = EvalNegate(negated); return result; } inline Ciphertext<Element> EvalSub( double constant, ConstCiphertext<Element> ciphertext) const { return EvalAdd(EvalNegate(ciphertext), constant); } /** * EvalMult - PALISADE EvalMult method for a pair of ciphertexts - with key * switching * @param ct1 * @param ct2 * @return new ciphertext for ct1 * ct2 */ Ciphertext<Element> EvalMult(ConstCiphertext<Element> ct1, ConstCiphertext<Element> ct2) const { TypeCheck(ct1, ct2); auto ek = GetEvalMultKeyVector(ct1->GetKeyTag()); if (!ek.size()) { PALISADE_THROW(type_error, "Evaluation key has not been generated for EvalMult"); } TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalMult(ct1, ct2, ek[0]); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalMult, TOC_US(t))); } return rv; } /** * EvalMult - PALISADE EvalMult method for a pair of ciphertexts - with key * switching This is a mutable version - input ciphertexts may get * automatically rescaled, or level-reduced. * * @param ct1 * @param ct2 * @return new ciphertext for ct1 * ct2 */ Ciphertext<Element> EvalMultMutable(Ciphertext<Element>& ct1, Ciphertext<Element>& ct2) const { TypeCheck(ct1, ct2); auto ek = GetEvalMultKeyVector(ct1->GetKeyTag()); if (!ek.size()) { PALISADE_THROW(type_error, "Evaluation key has not been generated for EvalMult"); } TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalMultMutable(ct1, ct2, ek[0]); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalMult, TOC_US(t))); } return rv; } /** * EvalMult - PALISADE EvalMult method for a pair of ciphertexts - no key * switching (relinearization) * @param ct1 * @param ct2 * @return new ciphertext for ct1 * ct2 */ Ciphertext<Element> EvalMultNoRelin(ConstCiphertext<Element> ct1, ConstCiphertext<Element> ct2) const { TypeCheck(ct1, ct2); TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalMult(ct1, ct2); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalMult, TOC_US(t))); } return rv; } /** * EvalMultMany - PALISADE function for evaluating multiplication on * ciphertext followed by relinearization operation (at the end). It computes * the multiplication in a binary tree manner. Also, it reduces the number of * elements in the ciphertext to two after each multiplication. * Currently it assumes that the consecutive two input arguments have * total depth smaller than the supported depth. Otherwise, it throws an * error. * * @param cipherTextList is the ciphertext list. * * @return new ciphertext. */ Ciphertext<Element> EvalMultMany( const vector<Ciphertext<Element>>& ct) const { // input parameter check if (!ct.size()) PALISADE_THROW(type_error, "Empty input ciphertext vector"); const auto ek = GetEvalMultKeyVector(ct[0]->GetKeyTag()); if (ek.size() < (ct[0]->GetElements().size() - 2)) { PALISADE_THROW(type_error, "Insufficient value was used for maxDepth to generate " "keys for EvalMult"); } TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalMultMany(ct, ek); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalMultMany, TOC_US(t))); } return rv; } /** * EvalAddMany - Evaluate addition on a vector of ciphertexts. * It computes the addition in a binary tree manner. * * @param ctList is the list of ciphertexts. * * @return new ciphertext. */ Ciphertext<Element> EvalAddMany( const vector<Ciphertext<Element>>& ctList) const { // input parameter check if (!ctList.size()) PALISADE_THROW(type_error, "Empty input ciphertext vector"); TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalAddMany(ctList); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalAddMany, TOC_US(t))); } return rv; } /** * EvalAddManyInPlace - Evaluate addition on a vector of ciphertexts. * Addition is computed in a binary tree manner. Difference with EvalAddMany * is that EvalAddManyInPlace uses the input ciphertext vector to store * intermediate results, to avoid the overhead of using extra tepmorary * space. * * @param ctList is the list of ciphertexts. * * @return new ciphertext. */ Ciphertext<Element> EvalAddManyInPlace( vector<Ciphertext<Element>>& ctList) const { // input parameter check if (!ctList.size()) PALISADE_THROW(type_error, "Empty input ciphertext vector"); TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalAddManyInPlace(ctList); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalAddManyInPlace, TOC_US(t))); } return rv; } /** * Function for evaluating multiplication on ciphertext followed by * relinearization operation. Currently it assumes that the input arguments * have total depth smaller than the supported depth. Otherwise, it throws an * error. * * @param ct1 first input ciphertext. * @param ct2 second input ciphertext. * * @return new ciphertext */ Ciphertext<Element> EvalMultAndRelinearize( ConstCiphertext<Element> ct1, ConstCiphertext<Element> ct2) const { // input parameter check if (!ct1 || !ct2) PALISADE_THROW(type_error, "Input ciphertext is nullptr"); const auto ek = GetEvalMultKeyVector(ct1->GetKeyTag()); if (ek.size() < (ct1->GetElements().size() + ct2->GetElements().size() - 3)) { PALISADE_THROW(type_error, "Insufficient value was used for maxDepth to generate " "keys for EvalMult"); } TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalMultAndRelinearize(ct1, ct2, ek); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalMult, TOC_US(t))); } return rv; } /** * Function for relinearization of a ciphertext. * * @param ct input ciphertext. * * @return relinearized ciphertext */ Ciphertext<Element> Relinearize(ConstCiphertext<Element> ct) const { // input parameter check if (!ct) PALISADE_THROW(type_error, "Input ciphertext is nullptr"); const auto ek = GetEvalMultKeyVector(ct->GetKeyTag()); if (ek.size() < (ct->GetElements().size() - 2)) { PALISADE_THROW(type_error, "Insufficient value was used for maxDepth to generate " "keys for EvalMult"); } TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->Relinearize(ct, ek); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalRelin, TOC_US(t))); } return rv; } /** * EvalMult - PALISADE EvalMult method for plaintext * ciphertext * @param pt2 * @param ct1 * @return new ciphertext for ct1 * pt2 */ inline Ciphertext<Element> EvalMult(ConstPlaintext pt2, ConstCiphertext<Element> ct1) const { return EvalMult(ct1, pt2); } /** * EvalMult - PALISADE EvalMultMutable method for plaintext * ciphertext * @param pt2 * @param ct1 * @return new ciphertext for ct1 * pt2 */ inline Ciphertext<Element> EvalMultMutable(Plaintext pt2, Ciphertext<Element>& ct1) const { return EvalMultMutable(ct1, pt2); } /** * EvalMult - PALISADE EvalMult method for constant * ciphertext * @param constant * @param ct1 * @return new ciphertext for ct1 * constant */ inline Ciphertext<Element> EvalMult(double constant, ConstCiphertext<Element> ct1) const { return EvalMult(ct1, constant); } inline Ciphertext<Element> EvalMultMutable(double constant, Ciphertext<Element>& ct1) const { return EvalMultMutable(ct1, constant); } /** * EvalShiftRight - works only for Fractional Encoding * @param pt2 * @param ct1 * @return new ciphertext for ct1 * pt2 */ Ciphertext<Element> EvalRightShift(ConstCiphertext<Element> ct1, size_t divisor) const { if (ct1 && ct1->GetEncodingType() != Fractional) { std::stringstream ss; ss << "A " << Fractional << " encoded ciphertext is required for the EvalRightShift operation"; PALISADE_THROW(type_error, ss.str()); } Plaintext plaintextShift = MakeFractionalPlaintext(0, divisor); TypeCheck(ct1, plaintextShift); double start = 0; if (doTiming) start = currentDateTime(); auto rv = EvalMult(ct1, plaintextShift); if (doTiming) { timeSamples->push_back( TimingInfo(OpEvalRightShift, currentDateTime() - start)); } return rv; } /** * EvalMult - PALISADE EvalMult method for plaintext * ciphertext * @param ct1 * @param pt2 * @return new ciphertext for ct1 * pt2 */ Ciphertext<Element> EvalMult(ConstCiphertext<Element> ct1, ConstPlaintext pt2) const { TypeCheck(ct1, pt2); TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalMult(ct1, pt2); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalMult, TOC_US(t))); } return rv; } /** * EvalMult - PALISADE EvalMultMutable method for plaintext * ciphertext * This is a mutable version - input ciphertexts may get automatically * rescaled, or level-reduced. * * @param ct1 * @param pt2 * @return new ciphertext for ct1 * pt2 */ Ciphertext<Element> EvalMultMutable(Ciphertext<Element>& ct1, Plaintext pt2) const { TypeCheck(ct1, pt2); TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalMultMutable(ct1, pt2); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalMult, TOC_US(t))); } return rv; } /** * EvalMult - PALISADE EvalSub method for a ciphertext and constant * @param ciphertext * @param constant * @return new ciphertext for ciphertext - constant */ Ciphertext<Element> EvalMult(ConstCiphertext<Element> ciphertext, double constant) const { // input parameter check if (!ciphertext) { PALISADE_THROW(type_error, "Input ciphertext is nullptr"); } TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalMult(ciphertext, constant); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalMultConst, TOC_US(t))); } return rv; } /** * EvalMult - PALISADE EvalSub method for a ciphertext and constant * This is a mutable version - input ciphertexts may get automatically * rescaled, or level-reduced. * * @param ciphertext * @param constant * @return new ciphertext for ciphertext - constant */ Ciphertext<Element> EvalMultMutable(Ciphertext<Element>& ciphertext, double constant) const { // input parameter check if (!ciphertext) { PALISADE_THROW(type_error, "Input ciphertext is nullptr"); } TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalMultMutable(ciphertext, constant); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalMultConst, TOC_US(t))); } return rv; } /** * EvalMultMatrix - PALISADE EvalMult method for two matrices of ciphertext * @param ct1 * @param ct2 * @return new matrix for ct1 * ct2 */ shared_ptr<Matrix<RationalCiphertext<Element>>> EvalMultMatrix( const shared_ptr<Matrix<RationalCiphertext<Element>>> ct1, const shared_ptr<Matrix<RationalCiphertext<Element>>> ct2) const { TypeCheck((*ct1)(0, 0), (*ct2)(0, 0)); // TODO only checking one; when Matrix is // refactored, this should be revisited TimeVar t; if (doTiming) TIC(t); Matrix<RationalCiphertext<Element>> rv = *ct1 * *ct2; if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalMultMatrix, TOC_US(t))); } return std::make_shared<Matrix<RationalCiphertext<Element>>>(rv); } /** * EvalSub - PALISADE Negate method for a ciphertext * @param ct * @return new ciphertext -ct */ Ciphertext<Element> EvalNegate(ConstCiphertext<Element> ct) const { if (ct == nullptr || Mismatched(ct->GetCryptoContext())) PALISADE_THROW(config_error, "Information passed to EvalNegate was not generated with " "this crypto context"); TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalNegate(ct); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalNeg, TOC_US(t))); } return rv; } /** * EvalSub - PALISADE Negate method for a ciphertext * @param ct * @return new ciphertext -ct */ shared_ptr<Matrix<RationalCiphertext<Element>>> EvalNegateMatrix( const shared_ptr<Matrix<RationalCiphertext<Element>>> ct) const { if (ct == nullptr || Mismatched((*ct)(0, 0).GetCryptoContext())) PALISADE_THROW(config_error, "Information passed to EvalNegateMatrix was not generated " "with this crypto context"); TimeVar t; if (doTiming) TIC(t); auto m = std::make_shared<Matrix<RationalCiphertext<Element>>>( ct->GetAllocator(), ct->GetRows(), ct->GetCols()); for (size_t r = 0; r < m->GetRows(); r++) for (size_t c = 0; c < m->GetCols(); c++) (*m)(r, c) = -((*ct)(r, c)); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalNegMatrix, TOC_US(t))); } return m; } /** * Generate automophism keys for a given private key * * @param publicKey original public key. * @param origPrivateKey original private key. * @param indexList list of automorphism indices to be computed * @return returns the evaluation keys; index 0 of the vector corresponds to * plaintext index 2, index 1 to plaintex index 3, etc. */ shared_ptr<std::map<usint, LPEvalKey<Element>>> EvalAutomorphismKeyGen( const LPPublicKey<Element> publicKey, const LPPrivateKey<Element> origPrivateKey, const std::vector<usint>& indexList) const { if (publicKey == nullptr || origPrivateKey == nullptr) PALISADE_THROW(type_error, "Null Keys"); if (!indexList.size()) PALISADE_THROW(config_error, "Input index vector is empty"); if (publicKey->GetCryptoContext().get() != this) PALISADE_THROW(type_error, "Key was not created in this CryptoContextImpl"); if (publicKey->GetCryptoContext() != origPrivateKey->GetCryptoContext()) PALISADE_THROW(type_error, "Keys were not created in the same CryptoContextImpl"); TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalAutomorphismKeyGen( publicKey, origPrivateKey, indexList); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalAutomorphismKeyGen, TOC_US(t))); } return rv; } /** * Function for evaluating automorphism of ciphertext at index i * * @param ciphertext the input ciphertext. * @param i automorphism index * @param &evalKeys - reference to the vector of evaluation keys generated by * EvalAutomorphismKeyGen. * @return resulting ciphertext */ Ciphertext<Element> EvalAutomorphism( ConstCiphertext<Element> ciphertext, usint i, const std::map<usint, LPEvalKey<Element>>& evalKeys, CALLER_INFO_ARGS_HDR) const { if (nullptr == ciphertext) { std::string errorMsg(std::string("Input ciphertext is nullptr") + CALLER_INFO); PALISADE_THROW(type_error, errorMsg); } if (evalKeys.empty()) { std::string errorMsg(std::string("Empty input key map") + CALLER_INFO); PALISADE_THROW(type_error, errorMsg); } auto tk = evalKeys.begin()->second; if (nullptr == tk) { std::string errorMsg(std::string("Invalid evalKey") + CALLER_INFO); PALISADE_THROW(type_error, errorMsg); } if (ciphertext->GetCryptoContext().get() != this) { std::string errorMsg( std::string("Ciphertext was not created in this CryptoContextImpl") + CALLER_INFO); PALISADE_THROW(type_error, errorMsg); } if (ciphertext->GetCryptoContext() != tk->GetCryptoContext()) { std::string errorMsg( std::string("Items were not created in the same CryptoContextImpl") + CALLER_INFO); PALISADE_THROW(type_error, errorMsg); } if (ciphertext->GetKeyTag() != tk->GetKeyTag()) { std::string errorMsg( std::string("Items were not encrypted with same keys") + CALLER_INFO); PALISADE_THROW(type_error, errorMsg); } TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalAutomorphism(ciphertext, i, evalKeys); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalAutomorphismI, TOC_US(t))); } return rv; } /** * Generate automophism keys for a given private key; Uses the private key for * encryption * * @param privateKey private key. * @param indexList list of automorphism indices to be computed * @return returns the evaluation keys */ shared_ptr<std::map<usint, LPEvalKey<Element>>> EvalAutomorphismKeyGen( const LPPrivateKey<Element> privateKey, const std::vector<usint>& indexList) const { if (privateKey == nullptr) PALISADE_THROW(type_error, "Null input"); if (!indexList.size()) PALISADE_THROW(config_error, "Input index vector is empty"); if (privateKey->GetCryptoContext().get() != this) PALISADE_THROW(type_error, "Key was not created in this CryptoContextImpl"); TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalAutomorphismKeyGen(privateKey, indexList); if (doTiming) { timeSamples->push_back(TimingInfo(OpEvalAutomorphismK, TOC_US(t))); } return rv; } /** * EvalSumKeyGen Generates the key map to be used by evalsum * * @param privateKey private key. * @param publicKey public key (used in NTRU schemes). */ void EvalSumKeyGen(const LPPrivateKey<Element> privateKey, const LPPublicKey<Element> publicKey = nullptr); shared_ptr<std::map<usint, LPEvalKey<Element>>> EvalSumRowsKeyGen( const LPPrivateKey<Element> privateKey, const LPPublicKey<Element> publicKey = nullptr, usint rowSize = 0); shared_ptr<std::map<usint, LPEvalKey<Element>>> EvalSumColsKeyGen( const LPPrivateKey<Element> privateKey, const LPPublicKey<Element> publicKey = nullptr); /** * GetEvalSumKey returns the map * * @return the EvalSum key map */ static const std::map<usint, LPEvalKey<Element>>& GetEvalSumKeyMap( const string& id); static std::map<string, shared_ptr<std::map<usint, LPEvalKey<Element>>>>& GetAllEvalSumKeys(); /** * Function for evaluating a sum of all components * * @param ciphertext the input ciphertext. * @param batchSize size of the batch * @return resulting ciphertext */ Ciphertext<Element> EvalSum(ConstCiphertext<Element> ciphertext, usint batchSize) const; Ciphertext<Element> EvalSumRows( ConstCiphertext<Element> ciphertext, usint rowSize, const std::map<usint, LPEvalKey<Element>>& evalKeys) const; Ciphertext<Element> EvalSumCols( ConstCiphertext<Element> ciphertext, usint rowSize, const std::map<usint, LPEvalKey<Element>>& evalKeys) const; /** * EvalSumKeyGen Generates the key map to be used by evalsum * * @param privateKey private key. * @param indexList list of indices. * @param publicKey public key (used in NTRU schemes). */ void EvalAtIndexKeyGen(const LPPrivateKey<Element> privateKey, const std::vector<int32_t>& indexList, const LPPublicKey<Element> publicKey = nullptr); /** * EvalFastRotationPrecompute implements the precomputation step of * hoisted automorphisms. * * Please refer to Section 5 of Halevi and Shoup, "Faster Homomorphic * linear transformations in HELib." for more details, link: * https://eprint.iacr.org/2018/244. * * Generally, automorphisms are performed with three steps: (1) the * automorphism is applied on the ciphertext, (2) the automorphed values are * decomposed into digits, and (3) key switching is applied to make it * possible to further compute on the ciphertext. * * Hoisted automorphisms is a technique that performs the digit decomposition * for the original ciphertext first, and then performs the automorphism and * the key switching on the decomposed digits. The benefit of this is that the * digit decomposition is independent of the automorphism rotation index, so * it can be reused for multiple different indices. This can greatly improve * performance when we have to compute many automorphisms on the same * ciphertext. This routinely happens when we do permutations (EvalPermute). * * EvalFastRotationPrecompute implements the digit decomposition step of * hoisted automorphisms. * * @param ct the input ciphertext on which to do the precomputation (digit * decomposition) */ shared_ptr<vector<Element>> EvalFastRotationPrecompute( ConstCiphertext<Element> ct) const { TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalFastRotationPrecompute(ct); if (doTiming) { timeSamples->push_back(TimingInfo(OpFastRotPrecomp, TOC_US(t))); } return rv; } /** * EvalFastRotation implements the automorphism and key switching step of * hoisted automorphisms. * * Please refer to Section 5 of Halevi and Shoup, "Faster Homomorphic * linear transformations in HELib." for more details, link: * https://eprint.iacr.org/2018/244. * * Generally, automorphisms are performed with three steps: (1) the * automorphism is applied on the ciphertext, (2) the automorphed values are * decomposed into digits, and (3) key switching is applied to make it * possible to further compute on the ciphertext. * * Hoisted automorphisms is a technique that performs the digit decomposition * for the original ciphertext first, and then performs the automorphism and * the key switching on the decomposed digits. The benefit of this is that the * digit decomposition is independent of the automorphism rotation index, so * it can be reused for multiple different indices. This can greatly improve * performance when we have to compute many automorphisms on the same * ciphertext. This routinely happens when we do permutations (EvalPermute). * * EvalFastRotation implements the automorphism and key swithcing step of * hoisted automorphisms. * * This method assumes that all required rotation keys exist. This may not be * true if we are using baby-step/giant-step key switching. Please refer to * Section 5.1 of the above reference and EvalPermuteBGStepHoisted to see how * to deal with this issue. * * @param ct the input ciphertext to perform the automorphism on * @param index the index of the rotation. Positive indices correspond to left * rotations and negative indices correspond to right rotations. * @param m is the cyclotomic order * @param digits the digit decomposition created by EvalFastRotationPrecompute * at the precomputation step. */ Ciphertext<Element> EvalFastRotation( ConstCiphertext<Element> ct, const usint index, const usint m, const shared_ptr<vector<Element>> digits) const { TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalFastRotation(ct, index, m, digits); if (doTiming) { timeSamples->push_back(TimingInfo(OpFastRot, TOC_US(t))); } return rv; } /** * Merges multiple ciphertexts with encrypted results in slot 0 into a single * ciphertext The slot assignment is done based on the order of ciphertexts in * the vector * * @param ciphertextVector vector of ciphertexts to be merged. * @param &evalKeys - reference to the map of evaluation keys generated by * EvalAutomorphismKeyGen. * @return resulting ciphertext */ Ciphertext<Element> EvalMerge( const vector<Ciphertext<Element>>& ciphertextVector) const; /** * GetEvalAutomorphismKey returns the map * * @return the EvalAutomorphism key map */ static const std::map<usint, LPEvalKey<Element>>& GetEvalAutomorphismKeyMap( const string& id); static std::map<string, shared_ptr<std::map<usint, LPEvalKey<Element>>>>& GetAllEvalAutomorphismKeys(); /** * Moves i-th slot to slot 0 * * @param ciphertext. * @param i the index. * @return resulting ciphertext */ Ciphertext<Element> EvalAtIndex(ConstCiphertext<Element> ciphertext, int32_t index) const; /** * Evaluates inner product in batched encoding * * @param ciphertext1 first vector. * @param ciphertext2 second vector. * @param batchSize size of the batch to be summed up * @return resulting ciphertext */ Ciphertext<Element> EvalInnerProduct(ConstCiphertext<Element> ciphertext1, ConstCiphertext<Element> ciphertext2, usint batchSize) const; /** * Evaluates inner product in batched encoding * * @param ciphertext1 first vector - ciphertext. * @param plaintext second vector - plaintext. * @param batchSize size of the batch to be summed up * @return resulting ciphertext */ Ciphertext<Element> EvalInnerProduct(ConstCiphertext<Element> ciphertext1, ConstPlaintext plaintext, usint batchSize) const; /** * EvalCrossCorrelation - Computes the sliding sum of inner products (known as * as cross-correlation, sliding inner product, or sliding dot product in * image processing * @param x - first vector of row vectors * @param y - second vector of row vectors * @param batchSize - batch size for packed encoding * @param indexStart - starting index in the vectors of row vectors * @param length - length of the slice in the vectors of row vectors; default * is 0 meaning to use the full length of the vector * @return sum(x_i*y_i), i.e., a sum of inner products */ Ciphertext<Element> EvalCrossCorrelation( const shared_ptr<Matrix<RationalCiphertext<Element>>> x, const shared_ptr<Matrix<RationalCiphertext<Element>>> y, usint batchSize, usint indexStart = 0, usint length = 0) const; /** * Method for polynomial evaluation for polynomials represented as power * series. * * @param &cipherText input ciphertext * @param &coefficients is the vector of coefficients in the polynomial; the * size of the vector is the degree of the polynomial + 1 * @return the result of polynomial evaluation. */ virtual Ciphertext<Element> EvalPoly( ConstCiphertext<Element> ciphertext, const std::vector<double>& coefficients) const { if (ciphertext == nullptr || this->Mismatched(ciphertext->GetCryptoContext())) throw std::logic_error( "Information passed to EvalPoly was not generated with this crypto " "context"); TimeVar t; if (this->doTiming) TIC(t); auto rv = std::static_pointer_cast<LPPublicKeyEncryptionScheme<Element>>( this->GetEncryptionAlgorithm()) ->EvalPoly(ciphertext, coefficients); if (this->doTiming) { this->timeSamples->push_back(TimingInfo(OpEvalPoly, TOC_US(t))); } return rv; } /** * EvalLinRegressBatched- Computes the parameter vector for linear regression * using the least squares method Supported only in batched mode; currently * works only for two regressors * @param x - matrix of regressors * @param y - vector of dependent variables * @return the parameter vector using (x^T x)^{-1} x^T y (using least squares * method) */ shared_ptr<Matrix<RationalCiphertext<Element>>> EvalLinRegressBatched( const shared_ptr<Matrix<RationalCiphertext<Element>>> x, const shared_ptr<Matrix<RationalCiphertext<Element>>> y, usint batchSize) const; /** * EvalLinRegression - Computes the parameter vector for linear regression * using the least squares method * @param x - matrix of regressors * @param y - vector of dependent variables * @return the parameter vector using (x^T x)^{-1} x^T y (using least squares * method) */ shared_ptr<Matrix<RationalCiphertext<Element>>> EvalLinRegression( const shared_ptr<Matrix<RationalCiphertext<Element>>> x, const shared_ptr<Matrix<RationalCiphertext<Element>>> y) const { TypeCheck((*x)(0, 0), (*y)(0, 0)); // TODO only checking one; when Matrix is // refactored, this should be revisited TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->EvalLinRegression(x, y); if (doTiming) { timeSamples->push_back(TimingInfo(OpLinRegression, TOC_US(t))); } return rv; } /** * KeySwitch - PALISADE KeySwitch method * @param keySwitchHint - reference to KeySwitchHint * @param ciphertext - vector of ciphertext * @return new CiphertextImpl after applying key switch */ Ciphertext<Element> KeySwitch(const LPEvalKey<Element> keySwitchHint, ConstCiphertext<Element> ciphertext) const { if (keySwitchHint == nullptr || Mismatched(keySwitchHint->GetCryptoContext())) PALISADE_THROW( config_error, "Key passed to KeySwitch was not generated with this crypto context"); if (ciphertext == nullptr || Mismatched(ciphertext->GetCryptoContext())) PALISADE_THROW(config_error, "Ciphertext passed to KeySwitch was not generated with " "this crypto context"); TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->KeySwitch(keySwitchHint, ciphertext); if (doTiming) { timeSamples->push_back(TimingInfo(OpKeySwitch, TOC_US(t))); } return rv; } /** * Rescale - An alias for PALISADE ModReduce method. * This is because ModReduce is called Rescale in CKKS. * * @param ciphertext - vector of ciphertext * @return vector of mod reduced ciphertext */ Ciphertext<Element> Rescale(ConstCiphertext<Element> ciphertext) const { if (ciphertext == nullptr || Mismatched(ciphertext->GetCryptoContext())) PALISADE_THROW(config_error, "Information passed to Rescale was not generated with " "this crypto context"); TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->ModReduce(ciphertext); if (doTiming) { timeSamples->push_back(TimingInfo(OpModReduce, TOC_US(t))); } return rv; } /** * ModReduce - PALISADE ModReduce method used only for BGVrns * @param ciphertext - vector of ciphertext * @param numTowersToDrop - number of towers to drop * @return vector of mod reduced ciphertext */ Ciphertext<Element> ModReduce(ConstCiphertext<Element> ciphertext) const { if (ciphertext == nullptr || Mismatched(ciphertext->GetCryptoContext())) PALISADE_THROW( not_available_error, "Information passed to ModReduce was not generated with this crypto " "context"); TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->ModReduce(ciphertext); if (doTiming) { timeSamples->push_back(TimingInfo(OpModReduce, TOC_US(t))); } return rv; } /** * ModReduce - PALISADE ModReduce method * @param ciphertext - vector of ciphertext * @return vector of mod reduced ciphertext */ RationalCiphertext<Element> ModReduceRational( RationalCiphertext<Element> ciphertext) const { TimeVar t; if (doTiming) TIC(t); Ciphertext<Element> n = GetEncryptionAlgorithm()->ModReduce(ciphertext.GetNumerator()); Ciphertext<Element> d = GetEncryptionAlgorithm()->ModReduce(ciphertext.GetDenominator()); if (doTiming) { timeSamples->push_back(TimingInfo(OpModReduce, TOC_US(t))); } return RationalCiphertext<Element>(n, d); } /** * ModReduce - PALISADE ModReduce method * @param ciphertext - vector of ciphertext * @return vector of mod reduced ciphertext */ shared_ptr<Matrix<RationalCiphertext<Element>>> ModReduceMatrix( shared_ptr<Matrix<RationalCiphertext<Element>>> ciphertext) const { // needs context check TimeVar t; if (doTiming) TIC(t); auto m = std::make_shared<Matrix<RationalCiphertext<Element>>>( ciphertext->GetAllocator(), ciphertext->GetRows(), ciphertext->GetCols()); for (size_t r = 0; r < m->GetRows(); r++) for (size_t c = 0; c < m->GetCols(); c++) (*m)(r, c) = ModReduceRational((*ciphertext)(r, c)); if (doTiming) { timeSamples->push_back(TimingInfo(OpModReduceMatrix, TOC_US(t))); } return m; } /** * LevelReduce - PALISADE LevelReduce method * @param cipherText1 * @param linearKeySwitchHint * @return vector of level reduced ciphertext */ Ciphertext<Element> LevelReduce( ConstCiphertext<Element> cipherText1, const LPEvalKeyNTRU<Element> linearKeySwitchHint, size_t levels = 1) const { const auto cryptoParams = std::dynamic_pointer_cast<LPCryptoParametersCKKS<DCRTPoly>>( cipherText1->GetCryptoParameters()); if (cipherText1 == nullptr || Mismatched(cipherText1->GetCryptoContext())) { PALISADE_THROW(config_error, "Information passed to LevelReduce was not generated with " "this crypto context"); } TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->LevelReduce( cipherText1, linearKeySwitchHint, levels); if (doTiming) { timeSamples->push_back(TimingInfo(OpLevelReduce, TOC_US(t))); } return rv; } /** * ComposedEvalMult - PALISADE composed evalmult * @param ciphertext1 - vector for first cipher text * @param ciphertext2 - vector for second cipher text * @param quadKeySwitchHint - is the quadratic key switch hint from original * private key to the quadratic key return vector of resulting ciphertext */ Ciphertext<Element> ComposedEvalMult( ConstCiphertext<Element> ciphertext1, ConstCiphertext<Element> ciphertext2) const { if (ciphertext1 == nullptr || ciphertext2 == nullptr || ciphertext1->GetKeyTag() != ciphertext2->GetKeyTag() || Mismatched(ciphertext1->GetCryptoContext())) PALISADE_THROW(config_error, "Ciphertexts passed to ComposedEvalMult were not " "generated with this crypto context"); auto ek = GetEvalMultKeyVector(ciphertext1->GetKeyTag()); if (!ek.size()) { PALISADE_THROW(type_error, "Evaluation key has not been generated for EvalMult"); } TimeVar t; if (doTiming) TIC(t); auto rv = GetEncryptionAlgorithm()->ComposedEvalMult(ciphertext1, ciphertext2, ek[0]); if (doTiming) { timeSamples->push_back(TimingInfo(OpComposedEvalMult, TOC_US(t))); } return rv; } static LPPublicKey<Element> deserializePublicKey(const Serialized& serObj) __attribute__(( deprecated("serialization changed, see wiki for details"))); static LPPrivateKey<Element> deserializeSecretKey(const Serialized& serObj) __attribute__(( deprecated("serialization changed, see wiki for details"))); static LPEvalKey<Element> deserializeEvalKey(const Serialized& serObj) __attribute__(( deprecated("serialization changed, see wiki for details"))); static LPEvalKey<Element> deserializeEvalKeyInContext( const Serialized& serObj, CryptoContext<Element> cc) __attribute__(( deprecated("serialization changed, see wiki for details"))); template <class Archive> void save(Archive& ar, std::uint32_t const version) const { ar(cereal::make_nvp("cc", params)); ar(cereal::make_nvp("kt", scheme)); ar(cereal::make_nvp("si", m_schemeId)); } template <class Archive> void load(Archive& ar, std::uint32_t const version) { if (version > SerializedVersion()) { PALISADE_THROW(deserialize_error, "serialized object version " + std::to_string(version) + " is from a later version of the library"); } ar(cereal::make_nvp("cc", params)); ar(cereal::make_nvp("kt", scheme)); ar(cereal::make_nvp("si", m_schemeId)); // NOTE: a pointer to this object will be wrapped in a shared_ptr, and is a // "CryptoContext". PALISADE relies on the notion that identical // CryptoContextImpls are not duplicated in memory Once we deserialize this // object, we must check to see if there is a matching object for this // object that's already existing in memory if it DOES exist, use it. If it // does NOT exist, add this to the cache of all contexts } virtual std::string SerializedObjectName() const { return "CryptoContext"; } static uint32_t SerializedVersion() { return 1; } }; /** * @brief CryptoObject * * A class to aid in referring to the crypto context that an object belongs to */ template <typename Element> class CryptoObject { protected: CryptoContext<Element> context; // crypto context this object belongs to // tag used to find the evaluation key needed // for SHE/FHE operations string keyTag; public: explicit CryptoObject(CryptoContext<Element> cc = nullptr, const string& tag = "") : context(cc), keyTag(tag) {} CryptoObject(const CryptoObject& rhs) { context = rhs.context; keyTag = rhs.keyTag; } CryptoObject(const CryptoObject&& rhs) { context = std::move(rhs.context); keyTag = std::move(rhs.keyTag); } virtual ~CryptoObject() {} const CryptoObject& operator=(const CryptoObject& rhs) { this->context = rhs.context; this->keyTag = rhs.keyTag; return *this; } const CryptoObject& operator=(const CryptoObject&& rhs) { this->context = std::move(rhs.context); this->keyTag = std::move(rhs.keyTag); return *this; } bool operator==(const CryptoObject& rhs) const { return context.get() == rhs.context.get() && keyTag == rhs.keyTag; } CryptoContext<Element> GetCryptoContext() const { return context; } const shared_ptr<LPCryptoParameters<Element>> GetCryptoParameters() const { return context->GetCryptoParameters(); } const EncodingParams GetEncodingParameters() const { return context->GetCryptoParameters()->GetEncodingParams(); } const string GetKeyTag() const { return keyTag; } void SetKeyTag(const string& tag) { keyTag = tag; } template <class Archive> void save(Archive& ar, std::uint32_t const version) const { ar(::cereal::make_nvp("cc", context)); ar(::cereal::make_nvp("kt", keyTag)); } template <class Archive> void load(Archive& ar, std::uint32_t const version) { if (version > SerializedVersion()) { PALISADE_THROW(deserialize_error, "serialized object version " + std::to_string(version) + " is from a later version of the library"); } ar(::cereal::make_nvp("cc", context)); ar(::cereal::make_nvp("kt", keyTag)); context = CryptoContextFactory<Element>::GetContext( context->GetCryptoParameters(), context->GetEncryptionAlgorithm()); } std::string SerializedObjectName() const { return "CryptoObject"; } static uint32_t SerializedVersion() { return 1; } }; /** * @brief CryptoContextFactory * * A class that contains static methods to generate new crypto contexts from * user parameters * */ template <typename Element> class CryptoContextFactory { using ParmType = typename Element::Params; using IntType = typename Element::Integer; protected: static vector<CryptoContext<Element>> AllContexts; public: static void ReleaseAllContexts(); static int GetContextCount(); static CryptoContext<Element> GetSingleContext(); static CryptoContext<Element> GetContext( shared_ptr<LPCryptoParameters<Element>> params, shared_ptr<LPPublicKeyEncryptionScheme<Element>> scheme, const string& schemeId = "Not"); static CryptoContext<Element> GetContextForPointer( CryptoContextImpl<Element>* cc); static const vector<CryptoContext<Element>>& GetAllContexts(); /** * construct a PALISADE CryptoContextImpl for the BFV Scheme * @param params ring parameters * @param plaintextModulus plaintext modulus * @param relinWindow bits in the base of digits in key * switching/relinearization * @param stdDev sigma - distribution parameter for error distribution * @param delta - the plaintext scaling parameter floor(q/t) in BFV * @param mode - mode for generating secret keys (RLWE vs OPTIMIZED) * @param bigmodulus - large modulus used in tensoring of homomorphic * multiplication * @param bigrootofunity - root of unity for bigmodulus * @param depth of supported computation circuit (not used; for future use) * @param assuranceMeasure alpha - effective bound for gaussians: - * sqrt{alpha}*sigma..sqrt{alpha}*sigma * @param security level - root Hermite factor * @param bigmodulusarb - additional large modulus for bigmoduls for the case * of general (non-power-of-two) cyclotomics * @param bigrootofunityarb - root of unity for bigmodulusarb * @param maxDepth the maximum power of secret key for which the * relinearization key is generated (by default, it is 2); setting it to a * value larger than 2 adds support for homomorphic multiplication w/o * relinearization * @return new context */ static CryptoContext<Element> genCryptoContextBFV( shared_ptr<ParmType> params, const PlaintextModulus plaintextmodulus, usint relinWindow, float stDev, const std::string& delta, MODE mode = RLWE, const std::string& bigmodulus = "0", const std::string& bigrootofunity = "0", int depth = 0, int assuranceMeasure = 0, float securityLevel = 0, const std::string& bigmodulusarb = "0", const std::string& bigrootofunityarb = "0", int maxDepth = 2); /** * construct a PALISADE CryptoContextImpl for the BFV Scheme * @param params ring parameters * @param encodingParams plaintext encoding parameters * @param relinWindow bits in the base of digits in key * switching/relinearization * @param stdDev sigma - distribution parameter for error distribution * @param delta - the plaintext scaling parameter floor(q/t) in BFV * @param mode - mode for generating secret keys (RLWE vs OPTIMIZED) * @param bigmodulus - large modulus used in tensoring of homomorphic * multiplication * @param bigrootofunity - root of unity for bigmodulus * @param depth of supported computation circuit (not used; for future use) * @param assuranceMeasure alpha - effective bound for gaussians: - * sqrt{alpha}*sigma..sqrt{alpha}*sigma * @param security level - root Hermite factor * @param bigmodulusarb - additional large modulus for bigmoduls for the case * of general (non-power-of-two) cyclotomics * @param bigrootofunityarb - root of unity for bigmodulusarb * @param maxDepth the maximum power of secret key for which the * relinearization key is generated (by default, it is 2); setting it to a * value larger than 2 adds support for homomorphic multiplication w/o * relinearization * @return new context */ static CryptoContext<Element> genCryptoContextBFV( shared_ptr<ParmType> params, EncodingParams encodingParams, usint relinWindow, float stDev, const std::string& delta, MODE mode = RLWE, const std::string& bigmodulus = "0", const std::string& bigrootofunity = "0", int depth = 0, int assuranceMeasure = 0, float securityLevel = 0, const std::string& bigmodulusarb = "0", const std::string& bigrootofunityarb = "0", int maxDepth = 2); /** * construct a PALISADE CryptoContextImpl for the BFV Scheme using the * scheme's ParamsGen methods * @param plaintextModulus plaintext modulus * @param securityLevel root Hermite factor (lattice security parameter) * @param relinWindow bits in the base of digits in key * switching/relinearization * @param dist distribution parameter for Gaussian noise generation * @param numAdds additive depth for homomorphic computations (assumes * numMults and numKeySwitches are set to zero) * @param numMults multiplicative depth for homomorphic computations (assumes * numAdds and numKeySwitches are set to zero) * @param numKeyswitches key-switching depth for homomorphic computations * (assumes numAdds and numMults are set to zero) * @param mode secret key distribution mode (RLWE [Gaussian noise] or * OPTIMIZED [ternary uniform distribution]) * @param maxDepth the maximum power of secret key for which the * relinearization key is generated (by default, it is 2); setting it to a * value larger than 2 adds support for homomorphic multiplication w/o * relinearization * @param n ring dimension in case the user wants to use a custom ring * dimension * @return new context */ static CryptoContext<Element> genCryptoContextBFV( const PlaintextModulus plaintextModulus, float securityLevel, usint relinWindow, float dist, unsigned int numAdds, unsigned int numMults, unsigned int numKeyswitches, MODE mode = OPTIMIZED, int maxDepth = 2, uint32_t n = 0); /** * construct a PALISADE CryptoContextImpl for the BFV Scheme using the * scheme's ParamsGen methods * @param plaintextModulus plaintext modulus * @param securityLevel standard security level * @param relinWindow bits in the base of digits in key * switching/relinearization * @param dist distribution parameter for Gaussian noise generation * @param numAdds additive depth for homomorphic computations (assumes * numMults and numKeySwitches are set to zero) * @param numMults multiplicative depth for homomorphic computations (assumes * numAdds and numKeySwitches are set to zero) * @param numKeyswitches key-switching depth for homomorphic computations * (assumes numAdds and numMults are set to zero) * @param mode secret key distribution mode (RLWE [Gaussian noise] or * OPTIMIZED [ternary uniform distribution]) * @param maxDepth the maximum power of secret key for which the * relinearization key is generated (by default, it is 2); setting it to a * value larger than 2 adds support for homomorphic multiplication w/o * relinearization * @param n ring dimension in case the user wants to use a custom ring * dimension * @return new context */ static CryptoContext<Element> genCryptoContextBFV( const PlaintextModulus plaintextModulus, SecurityLevel securityLevel, usint relinWindow, float dist, unsigned int numAdds, unsigned int numMults, unsigned int numKeyswitches, MODE mode = OPTIMIZED, int maxDepth = 2, uint32_t n = 0); /** * construct a PALISADE CryptoContextImpl for the BFV Scheme using the * scheme's ParamsGen methods * @param encodingParams plaintext encoding parameters * @param securityLevel root Hermite factor (lattice security parameter) * @param distribution parameter for Gaussian noise generation * @param numAdds additive depth for homomorphic computations (assumes * numMults and numKeySwitches are set to zero) * @param numMults multiplicative depth for homomorphic computations (assumes * numAdds and numKeySwitches are set to zero) * @param numKeyswitches key-switching depth for homomorphic computations * (assumes numAdds and numMults are set to zero) * @param mode secret key distribution mode (RLWE [Gaussian noise] or * OPTIMIZED [ternary uniform distribution]) * @param maxDepth the maximum power of secret key for which the * relinearization key is generated (by default, it is 2); setting it to a * value larger than 2 adds support for homomorphic multiplication w/o * relinearization * @param n ring dimension in case the user wants to use a custom ring * dimension * @return new context */ static CryptoContext<Element> genCryptoContextBFV( EncodingParams encodingParams, float securityLevel, usint relinWindow, float dist, unsigned int numAdds, unsigned int numMults, unsigned int numKeyswitches, MODE mode = OPTIMIZED, int maxDepth = 2, uint32_t n = 0); /** * construct a PALISADE CryptoContextImpl for the BFV Scheme using the * scheme's ParamsGen methods * @param encodingParams plaintext encoding parameters * @param securityLevel standard security level * @param distribution parameter for Gaussian noise generation * @param numAdds additive depth for homomorphic computations (assumes * numMults and numKeySwitches are set to zero) * @param numMults multiplicative depth for homomorphic computations (assumes * numAdds and numKeySwitches are set to zero) * @param numKeyswitches key-switching depth for homomorphic computations * (assumes numAdds and numMults are set to zero) * @param mode secret key distribution mode (RLWE [Gaussian noise] or * OPTIMIZED [ternary uniform distribution]) * @param maxDepth the maximum power of secret key for which the * relinearization key is generated (by default, it is 2); setting it to a * value larger than 2 adds support for homomorphic multiplication w/o * relinearization * @param n ring dimension in case the user wants to use a custom ring * dimension * @return new context */ static CryptoContext<Element> genCryptoContextBFV( EncodingParams encodingParams, SecurityLevel securityLevel, usint relinWindow, float dist, unsigned int numAdds, unsigned int numMults, unsigned int numKeyswitches, MODE mode = OPTIMIZED, int maxDepth = 2, uint32_t n = 0); /** * construct a PALISADE CryptoContextImpl for the BFVrns Scheme using the * scheme's ParamsGen methods * @param plaintextModulus plaintext modulus * @param securityLevel root Hermite factor (lattice security parameter) * @param dist distribution parameter for Gaussian noise generation * @param numAdds additive depth for homomorphic computations (assumes * numMults and numKeySwitches are set to zero) * @param numMults multiplicative depth for homomorphic computations (assumes * numAdds and numKeySwitches are set to zero) * @param numKeyswitches key-switching depth for homomorphic computations * (assumes numAdds and numMults are set to zero) * @param mode secret key distribution mode (RLWE [Gaussian noise] or * OPTIMIZED [ternary uniform distribution]) * @param maxDepth the maximum power of secret key for which the * relinearization key is generated (by default, it is 2); setting it to a * value larger than 2 adds support for homomorphic multiplication w/o * relinearization * @param relinWindow the key switching window (bits in the base for digits) * used for digit decomposition (0 - means to use only CRT decomposition) * @param dcrtBits size of "small" CRT moduli * @param n ring dimension in case the user wants to use a custom ring * dimension * @return new context */ static CryptoContext<Element> genCryptoContextBFVrns( const PlaintextModulus plaintextModulus, float securityLevel, float dist, unsigned int numAdds, unsigned int numMults, unsigned int numKeyswitches, MODE mode = OPTIMIZED, int maxDepth = 2, uint32_t relinWindow = 0, size_t dcrtBits = 60, uint32_t n = 0); /** * construct a PALISADE CryptoContextImpl for the BFVrns Scheme using the * scheme's ParamsGen methods * @param plaintextModulus plaintext modulus * @param securityLevel standard secuirity level * @param dist distribution parameter for Gaussian noise generation * @param numAdds additive depth for homomorphic computations (assumes * numMults and numKeySwitches are set to zero) * @param numMults multiplicative depth for homomorphic computations (assumes * numAdds and numKeySwitches are set to zero) * @param numKeyswitches key-switching depth for homomorphic computations * (assumes numAdds and numMults are set to zero) * @param mode secret key distribution mode (RLWE [Gaussian noise] or * OPTIMIZED [ternary uniform distribution]) * @param maxDepth the maximum power of secret key for which the * relinearization key is generated (by default, it is 2); setting it to a * value larger than 2 adds support for homomorphic multiplication w/o * relinearization * @param relinWindow the key switching window (bits in the base for digits) * used for digit decomposition (0 - means to use only CRT decomposition) * @param dcrtBits size of "small" CRT moduli * @param n ring dimension in case the user wants to use a custom ring * dimension * @return new context */ static CryptoContext<Element> genCryptoContextBFVrns( const PlaintextModulus plaintextModulus, SecurityLevel securityLevel, float dist, unsigned int numAdds, unsigned int numMults, unsigned int numKeyswitches, MODE mode = OPTIMIZED, int maxDepth = 2, uint32_t relinWindow = 0, size_t dcrtBits = 60, uint32_t n = 0); /** * construct a PALISADE CryptoContextImpl for the BFVrns Scheme using the * scheme's ParamsGen methods * @param encodingParams plaintext encoding parameters * @param securityLevel root Hermite factor (lattice security parameter) * @param dist distribution parameter for Gaussian noise generation * @param numAdds additive depth for homomorphic computations (assumes * numMults and numKeySwitches are set to zero) * @param numMults multiplicative depth for homomorphic computations (assumes * numAdds and numKeySwitches are set to zero) * @param numKeyswitches key-switching depth for homomorphic computations * (assumes numAdds and numMults are set to zero) * @param mode secret key distribution mode (RLWE [Gaussian noise] or * OPTIMIZED [ternary uniform distribution]) * @param maxDepth the maximum power of secret key for which the * relinearization key is generated (by default, it is 2); setting it to a * value larger than 2 adds support for homomorphic multiplication w/o * relinearization * @param relinWindow the key switching window used for digit decomposition * (0 - means to use only CRT decomposition) * @param dcrtBits size of "small" CRT moduli * @param n ring dimension in case the user wants to use a custom ring * dimension * @return new context */ static CryptoContext<Element> genCryptoContextBFVrns( EncodingParams encodingParams, float securityLevel, float dist, unsigned int numAdds, unsigned int numMults, unsigned int numKeyswitches, MODE mode = OPTIMIZED, int maxDepth = 2, uint32_t relinWindow = 0, size_t dcrtBits = 60, uint32_t n = 0); /** * construct a PALISADE CryptoContextImpl for the BFVrns Scheme using the * scheme's ParamsGen methods * @param encodingParams plaintext encoding parameters * @param securityLevel standard security level * @param dist distribution parameter for Gaussian noise generation * @param numAdds additive depth for homomorphic computations (assumes * numMults and numKeySwitches are set to zero) * @param numMults multiplicative depth for homomorphic computations (assumes * numAdds and numKeySwitches are set to zero) * @param numKeyswitches key-switching depth for homomorphic computations * (assumes numAdds and numMults are set to zero) * @param mode secret key distribution mode (RLWE [Gaussian noise] or * OPTIMIZED [ternary uniform distribution]) * @param maxDepth the maximum power of secret key for which the * relinearization key is generated (by default, it is 2); setting it to a * value larger than 2 adds support for homomorphic multiplication w/o * relinearization * @param relinWindow the key switching window used for digit decomposition * (0 - means to use only CRT decomposition) * @param dcrtBits size of "small" CRT moduli * @param n ring dimension in case the user wants to use a custom ring * dimension * @return new context */ static CryptoContext<Element> genCryptoContextBFVrns( EncodingParams encodingParams, SecurityLevel securityLevel, float dist, unsigned int numAdds, unsigned int numMults, unsigned int numKeyswitches, MODE mode = OPTIMIZED, int maxDepth = 2, uint32_t relinWindow = 0, size_t dcrtBits = 60, uint32_t n = 0); /** * construct a PALISADE CryptoContextImpl for the BFVrnsB Scheme using the * scheme's ParamsGen methods * @param plaintextModulus plaintext modulus * @param securityLevel root Hermite factor (lattice security parameter) * @param dist distribution parameter for Gaussian noise generation * @param numAdds additive depth for homomorphic computations (assumes * numMults and numKeySwitches are set to zero) * @param numMults multiplicative depth for homomorphic computations (assumes * numAdds and numKeySwitches are set to zero) * @param numKeyswitches key-switching depth for homomorphic computations * (assumes numAdds and numMults are set to zero) * @param mode secret key distribution mode (RLWE [Gaussian noise] or * OPTIMIZED [ternary uniform distribution]) * @param maxDepth the maximum power of secret key for which the * relinearization key is generated (by default, it is 2); setting it to a * value larger than 2 adds support for homomorphic multiplication w/o * relinearization * @param relinWindow the key switching window used for digit decomposition * (0 - means to use only CRT decomposition) * @param dcrtBits size of "small" CRT moduli * @param n ring dimension in case the user wants to use a custom ring * dimension * @return new context */ static CryptoContext<Element> genCryptoContextBFVrnsB( const PlaintextModulus plaintextModulus, float securityLevel, float dist, unsigned int numAdds, unsigned int numMults, unsigned int numKeyswitches, MODE mode = OPTIMIZED, int maxDepth = 2, uint32_t relinWindow = 0, size_t dcrtBits = 60, uint32_t n = 0); /** * construct a PALISADE CryptoContextImpl for the BFVrnsB Scheme using the * scheme's ParamsGen methods * @param plaintextModulus plaintext modulus * @param securityLevel standard security level * @param dist distribution parameter for Gaussian noise generation * @param numAdds additive depth for homomorphic computations (assumes * numMults and numKeySwitches are set to zero) * @param numMults multiplicative depth for homomorphic computations (assumes * numAdds and numKeySwitches are set to zero) * @param numKeyswitches key-switching depth for homomorphic computations * (assumes numAdds and numMults are set to zero) * @param mode secret key distribution mode (RLWE [Gaussian noise] or * OPTIMIZED [ternary uniform distribution]) * @param maxDepth the maximum power of secret key for which the * relinearization key is generated (by default, it is 2); setting it to a * value larger than 2 adds support for homomorphic multiplication w/o * relinearization * @param relinWindow the key switching window used for digit decomposition * (0 - means to use only CRT decomposition) * @param dcrtBits size of "small" CRT moduli * @param n ring dimension in case the user wants to use a custom ring * dimension * @return new context */ static CryptoContext<Element> genCryptoContextBFVrnsB( const PlaintextModulus plaintextModulus, SecurityLevel securityLevel, float dist, unsigned int numAdds, unsigned int numMults, unsigned int numKeyswitches, MODE mode = OPTIMIZED, int maxDepth = 2, uint32_t relinWindow = 0, size_t dcrtBits = 60, uint32_t = 0); /** * construct a PALISADE CryptoContextImpl for the BFVrnsB Scheme using the * scheme's ParamsGen methods * @param encodingParams plaintext encoding parameters * @param securityLevel root Hermite factor (lattice security parameter) * @param dist distribution parameter for Gaussian noise generation * @param numAdds additive depth for homomorphic computations (assumes * numMults and numKeySwitches are set to zero) * @param numMults multiplicative depth for homomorphic computations (assumes * numAdds and numKeySwitches are set to zero) * @param numKeyswitches key-switching depth for homomorphic computations * (assumes numAdds and numMults are set to zero) * @param mode secret key distribution mode (RLWE [Gaussian noise] or * OPTIMIZED [ternary uniform distribution]) * @param maxDepth the maximum power of secret key for which the * relinearization key is generated (by default, it is 2); setting it to a * value larger than 2 adds support for homomorphic multiplication w/o * relinearization * @param relinWindow the key switching window used for digit decomposition * (0 - means to use only CRT decomposition) * @param dcrtBits size of "small" CRT moduli * @param n ring dimension in case the user wants to use a custom ring * dimension * @return new context */ static CryptoContext<Element> genCryptoContextBFVrnsB( EncodingParams encodingParams, float securityLevel, float dist, unsigned int numAdds, unsigned int numMults, unsigned int numKeyswitches, MODE mode = OPTIMIZED, int maxDepth = 2, uint32_t relinWindow = 0, size_t dcrtBits = 60, uint32_t n = 0); /** * construct a PALISADE CryptoContextImpl for the BFVrnsB Scheme using the * scheme's ParamsGen methods * @param encodingParams plaintext encoding parameters * @param securityLevel standard security level * @param dist distribution parameter for Gaussian noise generation * @param numAdds additive depth for homomorphic computations (assumes * numMults and numKeySwitches are set to zero) * @param numMults multiplicative depth for homomorphic computations (assumes * numAdds and numKeySwitches are set to zero) * @param numKeyswitches key-switching depth for homomorphic computations * (assumes numAdds and numMults are set to zero) * @param mode secret key distribution mode (RLWE [Gaussian noise] or * OPTIMIZED [ternary uniform distribution]) * @param maxDepth the maximum power of secret key for which the * relinearization key is generated (by default, it is 2); setting it to a * value larger than 2 adds support for homomorphic multiplication w/o * relinearization * @param relinWindow the key switching window used for digit decomposition * (0 - means to use only CRT decomposition) * @param dcrtBits size of "small" CRT moduli * @param n ring dimension in case the user wants to use a custom ring * dimension * @return new context */ static CryptoContext<Element> genCryptoContextBFVrnsB( EncodingParams encodingParams, SecurityLevel securityLevel, float dist, unsigned int numAdds, unsigned int numMults, unsigned int numKeyswitches, MODE mode = OPTIMIZED, int maxDepth = 2, uint32_t relinWindow = 0, size_t dcrtBits = 60, uint32_t n = 0); /** * construct a PALISADE CryptoContextImpl for the BGV Scheme * @param params ring parameters * @param plaintextModulus plaintext modulus * @param relinWindow bits in the base of digits in key * switching/relinearization * @param stdDev sigma - distribution parameter for error distribution * @param mode secret key distribution mode (RLWE [Gaussian noise] or * OPTIMIZED [ternary uniform distribution]) * @param depth of supported computation circuit (not used; for future use) * @return new context */ static CryptoContext<Element> genCryptoContextBGV( shared_ptr<ParmType> params, const PlaintextModulus plaintextmodulus, usint relinWindow, float stDev, MODE mode = RLWE, int depth = 1); /** * construct a PALISADE CryptoContextImpl for the BGV Scheme * @param params ring parameters * @param encodingParams plaintext encoding parameters * @param relinWindow bits in the base of digits in key * switching/relinearization * @param stdDev sigma - distribution parameter for error distribution * @param mode secret key distribution mode (RLWE [Gaussian noise] or * OPTIMIZED [ternary uniform distribution]) * @param depth of supported computation circuit (not used; for future use) * @return new context */ static CryptoContext<Element> genCryptoContextBGV( shared_ptr<ParmType> params, EncodingParams encodingParams, usint relinWindow, float stDev, MODE mode = RLWE, int depth = 1); /** * construct a PALISADE CryptoContextImpl for the CKKS Scheme * @param plaintextmodulus * @param ringdim * @param modulus * @param rootOfUnity * @param relinWindow * @param stDev * @param mode * @param depth * @param maxDepth the maximum power of secret key for which the * relinearization key is generated * @param ksTech key switching technique to use (e.g., GHS or BV) * @param rsTech rescaling technique to use (e.g., APPROXRESCALE or * EXACTRESCALE) * @return new context */ static CryptoContext<Element> genCryptoContextCKKS( shared_ptr<ParmType> params, const PlaintextModulus plaintextmodulus, usint relinWindow, float stDev, MODE mode = RLWE, int depth = 1, int maxDepth = 2, KeySwitchTechnique ksTech = BV, RescalingTechnique rsTech = APPROXRESCALE); /** * construct a PALISADE CryptoContextImpl for the CKKS Scheme * @param encodingParams * @param ringdim * @param modulus * @param rootOfUnity * @param relinWindow * @param stDev * @param mode * @param maxDepth the maximum power of secret key for which the * relinearization key is generated * @param ksTech key switching technique to use (e.g., GHS or BV) * @param rsTech rescaling technique to use (e.g., APPROXRESCALE or * EXACTRESCALE) * @return new context */ static CryptoContext<Element> genCryptoContextCKKS( shared_ptr<ParmType> params, EncodingParams encodingParams, usint relinWindow, float stDev, MODE mode = RLWE, int depth = 1, int maxDepth = 2, enum KeySwitchTechnique ksTech = BV, RescalingTechnique rsTech = APPROXRESCALE); /** * Automatically generate the moduli chain and construct a PALISADE * CryptoContextImpl for the CKKS Scheme with it. * * @param cyclOrder the cyclotomic order M * @param numPrimes the number of towers/primes to use when building the * moduli chain * @param scaleExp the plaintext scaling factor, which is equal to dcrtBits in * our implementation of CKKS * @param batchSize the batch size of the ciphertext * @param mode RLWE or OPTIMIZED * @param depth * @param maxDepth the maximum power of secret key for which the * relinearization key is generated * @param firstModSize the bit-length of the first modulus * @param ksTech key switching technique to use (e.g., GHS or BV) * @param rsTech rescaling technique to use (e.g., APPROXRESCALE or * EXACTRESCALE) * @param numLargeDigits the number of big digits to use in HYBRID key * switching * @return new context */ static CryptoContext<Element> genCryptoContextCKKSWithParamsGen( usint cyclOrder, usint numPrimes, usint scaleExp, usint relinWindow, usint batchSize, MODE mode, int depth = 1, int maxDepth = 2, usint firstModSize = 60, enum KeySwitchTechnique ksTech = BV, enum RescalingTechnique rsTech = APPROXRESCALE, uint32_t numLargeDigits = 4); /** * Construct a PALISADE CryptoContextImpl for the CKKS Scheme. * * @param multiplicativeDepth the depth of multiplications supported by the * scheme (equal to number of towers - 1) * @param scalingFactorBits the size of the scaling factor in bits * @param batchSize the number of slots being used in the ciphertext * @param stdLevel the standard security level we want the scheme to satisfy * @param ringDim the ring dimension (if not specified selected automatically * based on stdLevel) * @param ksTech key switching technique to use (e.g., HYBRID, GHS or BV) * @param rsTech rescaling technique to use (e.g., APPROXRESCALE or * EXACTRESCALE) * @param numLargeDigits the number of big digits to use in HYBRID key * switching * @param maxDepth the maximum power of secret key for which the * relinearization key is generated * @param firstModSize the bit-length of the first modulus * @param relinWindow the relinearization windows (used in BV key switching, * use 0 for RNS decomposition) * @param mode RLWE (gaussian distribution) or OPTIMIZED (ternary * distribution) * @return new context */ static CryptoContext<Element> genCryptoContextCKKS( usint multiplicativeDepth, usint scalingFactorBits, usint batchSize, SecurityLevel stdLevel = HEStd_128_classic, usint ringDim = 0, enum RescalingTechnique rsTech = EXACTRESCALE, enum KeySwitchTechnique ksTech = HYBRID, uint32_t numLargeDigits = 0, int maxDepth = 2, usint firstModSize = 60, usint relinWindow = 0, MODE mode = OPTIMIZED); /** * construct a PALISADE CryptoContextImpl for the BGVrns Scheme * @param plaintextmodulus * @param ringdim * @param modulus * @param rootOfUnity * @param relinWindow * @param stDev * @param mode * @param depth * @param maxDepth the maximum power of secret key for which the * relinearization key is generated * @param ksTech key switching technique to use (e.g., GHS or BV) * @param msMethod mod switch method * @return new context */ static CryptoContext<Element> genCryptoContextBGVrns( shared_ptr<ParmType> params, const PlaintextModulus plaintextmodulus, usint relinWindow, float stDev, MODE mode = RLWE, int depth = 1, int maxDepth = 2, KeySwitchTechnique ksTech = BV, enum ModSwitchMethod msMethod = MANUAL); /** * construct a PALISADE CryptoContextImpl for the BGVrns Scheme * @param encodingParams * @param ringdim * @param modulus * @param rootOfUnity * @param relinWindow * @param stDev * @param mode * @param maxDepth the maximum power of secret key for which the * relinearization key is generated * @param ksTech key switching technique to use (e.g., GHS or BV) * @param msMethod mod switch method * @return new context */ static CryptoContext<Element> genCryptoContextBGVrns( shared_ptr<ParmType> params, EncodingParams encodingParams, usint relinWindow, float stDev, MODE mode = RLWE, int depth = 1, int maxDepth = 2, enum KeySwitchTechnique ksTech = BV, enum ModSwitchMethod msMethod = MANUAL); /** * Automatically generate the moduli chain and construct a PALISADE * CryptoContextImpl for the BGVrns Scheme with it. * * @param cyclOrder the cyclotomic order M * @param numPrimes the number of towers/primes to use when building the * moduli chain * @param ptm the plaintext modulus * @param mode RLWE or OPTIMIZED * @param depth * @param maxDepth the maximum power of secret key for which the * relinearization key is generated * @param firstModSize the bit-length of the first modulus * @param dcrtrBits the size of the moduli in bits * @param ksTech key switching technique to use (e.g., GHS or BV) * @param numLargeDigits the number of big digits to use in HYBRID key * switching * @param batchSize the number of slots being used in the ciphertext * @param msMethod mod switch method * @return new context */ static CryptoContext<Element> genCryptoContextBGVrnsWithParamsGen( usint cyclOrder, usint numPrimes, usint ptm, usint relinWindow, MODE mode, int depth = 1, int maxDepth = 2, enum KeySwitchTechnique ksTech = BV, usint firstModSize = 0, usint dcrtBits = 0, uint32_t numLargeDigits = 4, usint batchSize = 0, enum ModSwitchMethod msMethod = MANUAL); /** * Construct a PALISADE CryptoContextImpl for the BGVrns Scheme. * * @param multiplicativeDepth the depth of multiplications supported by the * scheme (equal to number of towers - 1) * @param ptm the plaintext modulus * @param stdLevel the standard security level we want the scheme to satisfy * @param stdDev sigma - distribution parameter for error distribution * @param maxDepth the maximum power of secret key for which the * relinearization key is generated * @param mode RLWE (gaussian distribution) or OPTIMIZED (ternary * distribution) * @param ksTech key switching technique to use (e.g., HYBRID, GHS or BV) * @param ringDim the ring dimension (if not specified selected automatically * based on stdLevel) * @param numLargeDigits the number of big digits to use in HYBRID key * switching * @param firstModSize the bit-length of the first modulus * @param dcrtrBits the size of the moduli in bits * @param relinWindow the relinearization windows (used in BV key switching, * use 0 for RNS decomposition) * @param batchSize the number of slots being used in the ciphertext * @param msMethod mod switch method * @return new context */ static CryptoContext<Element> genCryptoContextBGVrns( usint multiplicativeDepth, usint ptm, SecurityLevel stdLevel = HEStd_128_classic, float stdDev = 3.19, int maxDepth = 2, MODE mode = OPTIMIZED, enum KeySwitchTechnique ksTech = HYBRID, usint ringDim = 0, uint32_t numLargeDigits = 0, usint firstModSize = 0, usint dcrtBits = 0, usint relinWindow = 0, usint batchSize = 0, enum ModSwitchMethod msMethod = AUTO); /** * construct a PALISADE CryptoContextImpl for the StehleSteinfeld Scheme * @param params ring parameters * @param plaintextModulus plaintext modulus * @param relinWindow bits in the base of digits in key * switching/relinearization * @param stdDev sigma - distribution parameter for error distribution * @param stdDev distribution parameter for secret key distribution * @param depth of supported computation circuit (not used; for future use) * @param assuranceMeasure alpha - effective bound for gaussians: - * sqrt{alpha}*sigma..sqrt{alpha}*sigma * @param security level - root Hermite factor * @return new context */ static CryptoContext<Element> genCryptoContextStehleSteinfeld( shared_ptr<ParmType> params, const PlaintextModulus plaintextmodulus, usint relinWindow, float stDev, float stDevStSt, int depth = 1, int assuranceMeasure = 9, float securityLevel = 1.006); /** * construct a PALISADE CryptoContextImpl for the StehleSteinfeld Scheme * @param params ring parameters * @param encodingParams plaintext encoding parameters * @param relinWindow bits in the base of digits in key * switching/relinearization * @param stdDev sigma - distribution parameter for error distribution * @param stdDev distribution parameter for secret key distribution * @param depth of supported computation circuit (not used; for future use) * @param assuranceMeasure alpha - effective bound for gaussians: - * sqrt{alpha}*sigma..sqrt{alpha}*sigma * @param security level - root Hermite factor * @return new context */ static CryptoContext<Element> genCryptoContextStehleSteinfeld( shared_ptr<ParmType> params, EncodingParams encodingParams, usint relinWindow, float stDev, float stDevStSt, int depth = 1, int assuranceMeasure = 9, float securityLevel = 1.006); /** * construct a PALISADE CryptoContextImpl for the Null Scheme * @param m cyclotomic order (ring dimension n = m/2 for power-of-two * cyclotomics) * @param plaintextModulus plaintext modulus * @return */ static CryptoContext<Element> genCryptoContextNull( unsigned int m, const PlaintextModulus ptModulus); /** * construct a PALISADE CryptoContextImpl for the Null Scheme * @param m cyclotomic order (ring dimension n = m/2 for power-of-two * cyclotomics) * @param encodingParams plaintext encoding parameters * @return */ static CryptoContext<Element> genCryptoContextNull( unsigned int m, EncodingParams encodingParams); static CryptoContext<Element> DeserializeAndCreateContext( const Serialized& serObj) __attribute__(( deprecated("serialization changed, see wiki for details"))); }; } // namespace lbcrypto #endif /* SRC_PKE_CRYPTOCONTEXT_H_ */
GB_binop__lt_bool.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__lt_bool // A.*B function (eWiseMult): GB_AemultB__lt_bool // A*D function (colscale): GB_AxD__lt_bool // D*A function (rowscale): GB_DxB__lt_bool // C+=B function (dense accum): GB_Cdense_accumB__lt_bool // C+=b function (dense accum): GB_Cdense_accumb__lt_bool // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__lt_bool // C=scalar+B GB_bind1st__lt_bool // C=scalar+B' GB_bind1st_tran__lt_bool // C=A+scalar GB_bind2nd__lt_bool // C=A'+scalar GB_bind2nd_tran__lt_bool // C type: bool // A type: bool // B,b type: bool // BinaryOp: cij = (aij < bij) #define GB_ATYPE \ bool #define GB_BTYPE \ bool #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ bool aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ bool bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = (x < y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LT || GxB_NO_BOOL || GxB_NO_LT_BOOL) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__lt_bool ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__lt_bool ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__lt_bool ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type bool bool bwork = (*((bool *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__lt_bool ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__lt_bool ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__lt_bool ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__lt_bool ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__lt_bool ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; bool x = (*((bool *) x_input)) ; bool *Bx = (bool *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { bool bij = Bx [p] ; Cx [p] = (x < bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__lt_bool ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; bool *Ax = (bool *) Ax_input ; bool y = (*((bool *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { bool aij = Ax [p] ; Cx [p] = (aij < y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ bool aij = Ax [pA] ; \ Cx [pC] = (x < aij) ; \ } GrB_Info GB_bind1st_tran__lt_bool ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ bool #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool x = (*((const bool *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ bool } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ bool aij = Ax [pA] ; \ Cx [pC] = (aij < y) ; \ } GrB_Info GB_bind2nd_tran__lt_bool ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool y = (*((const bool *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unaryop__lnot_uint32_bool.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_uint32_bool // op(A') function: GB_tran__lnot_uint32_bool // C type: uint32_t // A type: bool // cast: uint32_t cij = (uint32_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ bool #define GB_CTYPE \ uint32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ bool aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, x) \ uint32_t z = (uint32_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_UINT32 || GxB_NO_BOOL) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_uint32_bool ( uint32_t *restrict Cx, const bool *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_uint32_bool ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__lxor_fp64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__lxor_fp64) // A.*B function (eWiseMult): GB (_AemultB_08__lxor_fp64) // A.*B function (eWiseMult): GB (_AemultB_02__lxor_fp64) // A.*B function (eWiseMult): GB (_AemultB_04__lxor_fp64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__lxor_fp64) // A*D function (colscale): GB (_AxD__lxor_fp64) // D*A function (rowscale): GB (_DxB__lxor_fp64) // C+=B function (dense accum): GB (_Cdense_accumB__lxor_fp64) // C+=b function (dense accum): GB (_Cdense_accumb__lxor_fp64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lxor_fp64) // C=scalar+B GB (_bind1st__lxor_fp64) // C=scalar+B' GB (_bind1st_tran__lxor_fp64) // C=A+scalar GB (_bind2nd__lxor_fp64) // C=A'+scalar GB (_bind2nd_tran__lxor_fp64) // C type: double // A type: double // A pattern? 0 // B type: double // B pattern? 0 // BinaryOp: cij = ((aij != 0) != (bij != 0)) #define GB_ATYPE \ double #define GB_BTYPE \ double #define GB_CTYPE \ double // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ double aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ double bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ double t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = ((x != 0) != (y != 0)) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LXOR || GxB_NO_FP64 || GxB_NO_LXOR_FP64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__lxor_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__lxor_fp64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__lxor_fp64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type double double bwork = (*((double *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__lxor_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__lxor_fp64) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__lxor_fp64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; double alpha_scalar ; double beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((double *) alpha_scalar_in)) ; beta_scalar = (*((double *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__lxor_fp64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__lxor_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__lxor_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__lxor_fp64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__lxor_fp64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *Cx = (double *) Cx_output ; double x = (*((double *) x_input)) ; double *Bx = (double *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; double bij = GBX (Bx, p, false) ; Cx [p] = ((x != 0) != (bij != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__lxor_fp64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; double *Cx = (double *) Cx_output ; double *Ax = (double *) Ax_input ; double y = (*((double *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; double aij = GBX (Ax, p, false) ; Cx [p] = ((aij != 0) != (y != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = GBX (Ax, pA, false) ; \ Cx [pC] = ((x != 0) != (aij != 0)) ; \ } GrB_Info GB (_bind1st_tran__lxor_fp64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ double #if GB_DISABLE return (GrB_NO_VALUE) ; #else double x = (*((const double *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ double } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = GBX (Ax, pA, false) ; \ Cx [pC] = ((aij != 0) != (y != 0)) ; \ } GrB_Info GB (_bind2nd_tran__lxor_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double y = (*((const double *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
convolution_3x3_pack8to4_int8.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_winograd42_transform_kernel_pack8to4_int8_sse(const Mat& kernel, Mat& kernel_tm_pack8, int inch, int outch, const Option& opt) { #if NCNN_AVX512VNNI && __AVX512F__ && !__AVX512VNNI__ if (ncnn::cpu_support_x86_avx512_vnni()) { extern void conv3x3s1_winograd42_transform_kernel_pack8to4_int8_sse_avx512vnni(const Mat& kernel, Mat& kernel_tm_pack8, int inch, int outch, const Option& opt); conv3x3s1_winograd42_transform_kernel_pack8to4_int8_sse_avx512vnni(kernel, kernel_tm_pack8, inch, outch, opt); return; } #endif #if NCNN_AVXVNNI && __AVX2__ && !__AVXVNNI__ if (ncnn::cpu_support_x86_avx_vnni()) { extern void conv3x3s1_winograd42_transform_kernel_pack8to4_int8_sse_avxvnni(const Mat& kernel, Mat& kernel_tm_pack8, int inch, int outch, const Option& opt); conv3x3s1_winograd42_transform_kernel_pack8to4_int8_sse_avxvnni(kernel, kernel_tm_pack8, inch, outch, opt); return; } #endif #if NCNN_AVX2 && __AVX__ && !__AVX2__ if (ncnn::cpu_support_x86_avx2()) { extern void conv3x3s1_winograd42_transform_kernel_pack8to4_int8_sse_avx2(const Mat& kernel, Mat& kernel_tm_pack8, int inch, int outch, const Option& opt); conv3x3s1_winograd42_transform_kernel_pack8to4_int8_sse_avx2(kernel, kernel_tm_pack8, inch, outch, opt); return; } #endif #if NCNN_XOP && __SSE2__ && !__XOP__ if (ncnn::cpu_support_x86_xop()) { extern void conv3x3s1_winograd42_transform_kernel_pack8to4_int8_sse_xop(const Mat& kernel, Mat& kernel_tm_pack8, int inch, int outch, const Option& opt); conv3x3s1_winograd42_transform_kernel_pack8to4_int8_sse_xop(kernel, kernel_tm_pack8, inch, outch, opt); return; } #endif // winograd42 transform kernel Mat kernel_tm(6 * 6, inch, outch, (size_t)2u); const short ktm[6][3] = { {6, 0, 0}, {-4, -4, -4}, {-4, 4, -4}, {1, 2, 4}, {1, -2, 4}, {0, 0, 6} }; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const signed char* kernel0 = (const signed char*)kernel + p * inch * 9 + q * 9; short* kernel_tm0 = kernel_tm.channel(p).row<short>(q); // transform kernel const signed char* k0 = kernel0; const signed char* k1 = kernel0 + 3; const signed char* k2 = kernel0 + 6; // h short tmp[6][3]; for (int i = 0; i < 6; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // U for (int j = 0; j < 6; j++) { short* tmpp = &tmp[j][0]; for (int i = 0; i < 6; i++) { kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // interleave // src = 36-inch-outch // dst = 4b-8a-inch/8a-36-outch/4b kernel_tm_pack8.create(inch / 8, 36, outch / 4, (size_t)2u * 32, 32); int q = 0; for (; q + 3 < outch; q += 4) { const Mat k0 = kernel_tm.channel(q); const Mat k1 = kernel_tm.channel(q + 1); const Mat k2 = kernel_tm.channel(q + 2); const Mat k3 = kernel_tm.channel(q + 3); Mat kernel_tm = kernel_tm_pack8.channel(q / 4); for (int k = 0; k < 36; k++) { short* g00 = kernel_tm.row<short>(k); for (int p = 0; p + 7 < inch; p += 8) { #if __AVXVNNI__ || __AVX512VNNI__ || __XOP__ for (int i = 0; i < 4; i++) { const short* k00 = k0.row<const short>(p + i * 2); const short* k10 = k1.row<const short>(p + i * 2); const short* k20 = k2.row<const short>(p + i * 2); const short* k30 = k3.row<const short>(p + i * 2); const short* k01 = k0.row<const short>(p + i * 2 + 1); const short* k11 = k1.row<const short>(p + i * 2 + 1); const short* k21 = k2.row<const short>(p + i * 2 + 1); const short* k31 = k3.row<const short>(p + i * 2 + 1); g00[0] = k00[k]; g00[1] = k01[k]; g00[2] = k10[k]; g00[3] = k11[k]; g00[4] = k20[k]; g00[5] = k21[k]; g00[6] = k30[k]; g00[7] = k31[k]; g00 += 8; } #else for (int i = 0; i < 8; i++) { const short* k00 = k0.row<const short>(p + i); const short* k10 = k1.row<const short>(p + i); const short* k20 = k2.row<const short>(p + i); const short* k30 = k3.row<const short>(p + i); g00[0] = k00[k]; g00[1] = k10[k]; g00[2] = k20[k]; g00[3] = k30[k]; g00 += 4; } #endif } } } } static void conv3x3s1_winograd42_pack8to4_int8_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Option& opt) { #if NCNN_AVX512VNNI && __AVX512F__ && !__AVX512VNNI__ if (ncnn::cpu_support_x86_avx512_vnni()) { extern void conv3x3s1_winograd42_pack8to4_int8_sse_avx512vnni(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt); conv3x3s1_winograd42_pack8to4_int8_sse_avx512vnni(bottom_blob, top_blob, kernel_tm, opt); return; } #endif #if NCNN_AVXVNNI && __AVX2__ && !__AVXVNNI__ if (ncnn::cpu_support_x86_avx_vnni()) { extern void conv3x3s1_winograd42_pack8to4_int8_sse_avxvnni(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt); conv3x3s1_winograd42_pack8to4_int8_sse_avxvnni(bottom_blob, top_blob, kernel_tm, opt); return; } #endif #if NCNN_AVX2 && __AVX__ && !__AVX2__ if (ncnn::cpu_support_x86_avx2()) { extern void conv3x3s1_winograd42_pack8to4_int8_sse_avx2(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt); conv3x3s1_winograd42_pack8to4_int8_sse_avx2(bottom_blob, top_blob, kernel_tm, opt); return; } #endif #if NCNN_XOP && __SSE2__ && !__XOP__ if (ncnn::cpu_support_x86_xop()) { extern void conv3x3s1_winograd42_pack8to4_int8_sse_xop(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt); conv3x3s1_winograd42_pack8to4_int8_sse_xop(bottom_blob, top_blob, kernel_tm, opt); return; } #endif int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; // size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 4n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 3) / 4 * 4; outh = (outh + 3) / 4 * 4; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt); // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; const int tiles = w_tm / 6 * h_tm / 6; bottom_blob_tm.create(tiles, 36, inch, 2u * elempack, elempack, opt.workspace_allocator); // const float itm[4][4] = { // {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f}, // {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f}, // {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f}, // {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f} // }; // 0 = 4 * r00 - 5 * r02 + r04 // 1 = -4 * (r01 + r02) + r04 + r03 // 2 = 4 * (r01 - r02) + r04 - r03 // 3 = -2 * (r01 - r03) + r04 - r02 // 4 = 2 * (r01 - r03) + r04 - r02 // 5 = 4 * r01 - 5 * r03 + r05 #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob_bordered.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); short tmp[6][6][8]; // tile for (int i = 0; i < h_tm / 6; i++) { for (int j = 0; j < w_tm / 6; j++) { const signed char* r0 = img0.row<const signed char>(i * 4) + (j * 4) * 8; for (int m = 0; m < 6; m++) { // TODO use _mm_cvtepi8_epi16 on sse4.1 __m128i _r00_01 = _mm_loadu_si128((const __m128i*)r0); __m128i _r02_03 = _mm_loadu_si128((const __m128i*)(r0 + 16)); __m128i _r04_05 = _mm_loadu_si128((const __m128i*)(r0 + 32)); __m128i _extr0001 = _mm_cmpgt_epi8(_mm_setzero_si128(), _r00_01); __m128i _extr0203 = _mm_cmpgt_epi8(_mm_setzero_si128(), _r02_03); __m128i _extr0405 = _mm_cmpgt_epi8(_mm_setzero_si128(), _r04_05); __m128i _r00 = _mm_unpacklo_epi8(_r00_01, _extr0001); __m128i _r01 = _mm_unpackhi_epi8(_r00_01, _extr0001); __m128i _r02 = _mm_unpacklo_epi8(_r02_03, _extr0203); __m128i _r03 = _mm_unpackhi_epi8(_r02_03, _extr0203); __m128i _r04 = _mm_unpacklo_epi8(_r04_05, _extr0405); __m128i _r05 = _mm_unpackhi_epi8(_r04_05, _extr0405); __m128i _v5 = _mm_set1_epi16(5); __m128i _tmp0m = _mm_sub_epi16(_mm_add_epi16(_mm_slli_epi16(_r00, 2), _r04), _mm_mullo_epi16(_r02, _v5)); __m128i _tmp1m = _mm_sub_epi16(_mm_add_epi16(_r04, _r03), _mm_slli_epi16(_mm_add_epi16(_r01, _r02), 2)); __m128i _tmp2m = _mm_add_epi16(_mm_sub_epi16(_r04, _r03), _mm_slli_epi16(_mm_sub_epi16(_r01, _r02), 2)); __m128i _tmp3m = _mm_sub_epi16(_mm_sub_epi16(_r04, _r02), _mm_slli_epi16(_mm_sub_epi16(_r01, _r03), 1)); __m128i _tmp4m = _mm_add_epi16(_mm_sub_epi16(_r04, _r02), _mm_slli_epi16(_mm_sub_epi16(_r01, _r03), 1)); __m128i _tmp5m = _mm_sub_epi16(_mm_add_epi16(_mm_slli_epi16(_r01, 2), _r05), _mm_mullo_epi16(_r03, _v5)); _mm_storeu_si128((__m128i*)tmp[0][m], _tmp0m); _mm_storeu_si128((__m128i*)tmp[1][m], _tmp1m); _mm_storeu_si128((__m128i*)tmp[2][m], _tmp2m); _mm_storeu_si128((__m128i*)tmp[3][m], _tmp3m); _mm_storeu_si128((__m128i*)tmp[4][m], _tmp4m); _mm_storeu_si128((__m128i*)tmp[5][m], _tmp5m); r0 += w * 8; } short* r0_tm_0 = (short*)img0_tm + (i * w_tm / 6 + j) * 8; short* r0_tm_1 = r0_tm_0 + tiles * 8; short* r0_tm_2 = r0_tm_0 + tiles * 16; short* r0_tm_3 = r0_tm_0 + tiles * 24; short* r0_tm_4 = r0_tm_0 + tiles * 32; short* r0_tm_5 = r0_tm_0 + tiles * 40; for (int m = 0; m < 6; m++) { __m128i _tmp00 = _mm_loadu_si128((const __m128i*)tmp[m][0]); __m128i _tmp01 = _mm_loadu_si128((const __m128i*)tmp[m][1]); __m128i _tmp02 = _mm_loadu_si128((const __m128i*)tmp[m][2]); __m128i _tmp03 = _mm_loadu_si128((const __m128i*)tmp[m][3]); __m128i _tmp04 = _mm_loadu_si128((const __m128i*)tmp[m][4]); __m128i _tmp05 = _mm_loadu_si128((const __m128i*)tmp[m][5]); __m128i _v5 = _mm_set1_epi16(5); __m128i _r0tm0 = _mm_sub_epi16(_mm_add_epi16(_mm_slli_epi16(_tmp00, 2), _tmp04), _mm_mullo_epi16(_tmp02, _v5)); __m128i _r0tm1 = _mm_sub_epi16(_mm_add_epi16(_tmp04, _tmp03), _mm_slli_epi16(_mm_add_epi16(_tmp01, _tmp02), 2)); __m128i _r0tm2 = _mm_add_epi16(_mm_sub_epi16(_tmp04, _tmp03), _mm_slli_epi16(_mm_sub_epi16(_tmp01, _tmp02), 2)); __m128i _r0tm3 = _mm_sub_epi16(_mm_sub_epi16(_tmp04, _tmp02), _mm_slli_epi16(_mm_sub_epi16(_tmp01, _tmp03), 1)); __m128i _r0tm4 = _mm_add_epi16(_mm_sub_epi16(_tmp04, _tmp02), _mm_slli_epi16(_mm_sub_epi16(_tmp01, _tmp03), 1)); __m128i _r0tm5 = _mm_sub_epi16(_mm_add_epi16(_mm_slli_epi16(_tmp01, 2), _tmp05), _mm_mullo_epi16(_tmp03, _v5)); _mm_storeu_si128((__m128i*)r0_tm_0, _r0tm0); _mm_storeu_si128((__m128i*)r0_tm_1, _r0tm1); _mm_storeu_si128((__m128i*)r0_tm_2, _r0tm2); _mm_storeu_si128((__m128i*)r0_tm_3, _r0tm3); _mm_storeu_si128((__m128i*)r0_tm_4, _r0tm4); _mm_storeu_si128((__m128i*)r0_tm_5, _r0tm5); r0_tm_0 += tiles * 48; r0_tm_1 += tiles * 48; r0_tm_2 += tiles * 48; r0_tm_3 += tiles * 48; r0_tm_4 += tiles * 48; r0_tm_5 += tiles * 48; } } } } } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; const int tiles = h_tm / 6 * w_tm / 6; // permute // bottom_blob_tm.create(tiles, 36, inch, elemsize, elempack, opt.workspace_allocator); Mat bottom_blob_tm2; #if __AVX2__ if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 36, 2u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 2) bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 36, 2u * elempack, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 36, 2u * elempack, elempack, opt.workspace_allocator); #else if (tiles >= 2) bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 36, 2u * elempack, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 36, 2u * elempack, elempack, opt.workspace_allocator); #endif #pragma omp parallel for num_threads(opt.num_threads) for (int r = 0; r < 36; r++) { Mat tm2 = bottom_blob_tm2.channel(r); // tile int i = 0; #if __AVX2__ for (; i + 3 < tiles; i += 4) { short* tmpptr = tm2.row<short>(i / 4); const short* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { __m256i _r0 = _mm256_loadu_si256((const __m256i*)r0); __m256i _r1 = _mm256_loadu_si256((const __m256i*)(r0 + 16)); _mm256_storeu_si256((__m256i*)tmpptr, _r0); _mm256_storeu_si256((__m256i*)(tmpptr + 16), _r1); r0 += bottom_blob_tm.cstep * 8; tmpptr += 32; } } #endif for (; i + 1 < tiles; i += 2) { #if __AVX2__ short* tmpptr = tm2.row<short>(i / 4 + (i % 4) / 2); #else short* tmpptr = tm2.row<short>(i / 2); #endif const short* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { __m128i _r0 = _mm_loadu_si128((const __m128i*)r0); __m128i _r1 = _mm_loadu_si128((const __m128i*)(r0 + 8)); _mm_storeu_si128((__m128i*)tmpptr, _r0); _mm_storeu_si128((__m128i*)(tmpptr + 8), _r1); r0 += bottom_blob_tm.cstep * 8; tmpptr += 16; } } for (; i < tiles; i++) { #if __AVX2__ short* tmpptr = tm2.row<short>(i / 4 + (i % 4) / 2 + i % 2); #else short* tmpptr = tm2.row<short>(i / 2 + i % 2); #endif const short* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { __m128i _r0 = _mm_loadu_si128((const __m128i*)r0); _mm_storeu_si128((__m128i*)tmpptr, _r0); r0 += bottom_blob_tm.cstep * 8; tmpptr += 8; } } } bottom_blob_tm = Mat(); // permute end top_blob_tm.create(tiles, 36, outch, 4u * 4, 4, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { int* output0_tm = top_blob_tm.channel(p); const Mat kernel0_tm = kernel_tm.channel(p); for (int r = 0; r < 36; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; #if __AVX2__ for (; i + 3 < tiles; i += 4) { const short* r0 = bb2.row<const short>(i / 4); const short* k0 = kernel0_tm.row<const short>(r); int nn = inch; // inch always > 0 __m256i _sum0_1 = _mm256_setzero_si256(); __m256i _sum2_3 = _mm256_setzero_si256(); __m256i _sum4_5 = _mm256_setzero_si256(); __m256i _sum6_7 = _mm256_setzero_si256(); for (int j = 0; j < nn; j++) { // 0 1 2 3 4 5 6 7 8 9 a b c d e f __m256i _val0 = _mm256_loadu_si256((const __m256i*)r0); __m256i _w01 = _mm256_loadu_si256((const __m256i*)k0); __m256i _w23 = _mm256_loadu_si256((const __m256i*)(k0 + 16)); #if __AVXVNNI__ || __AVX512VNNI__ __m256i _val0_0123 = _mm256_permutevar8x32_epi32(_val0, _mm256_set_epi32(1, 1, 1, 1, 0, 0, 0, 0)); __m256i _val0_4567 = _mm256_permutevar8x32_epi32(_val0, _mm256_set_epi32(3, 3, 3, 3, 2, 2, 2, 2)); __m256i _val0_89ab = _mm256_permutevar8x32_epi32(_val0, _mm256_set_epi32(5, 5, 5, 5, 4, 4, 4, 4)); __m256i _val0_cdef = _mm256_permutevar8x32_epi32(_val0, _mm256_set_epi32(7, 7, 7, 7, 6, 6, 6, 6)); _sum0_1 = _mm256_dpwssd_epi32(_sum0_1, _w01, _val0_0123); _sum2_3 = _mm256_dpwssd_epi32(_sum2_3, _w01, _val0_89ab); _sum0_1 = _mm256_dpwssd_epi32(_sum0_1, _w23, _val0_4567); _sum2_3 = _mm256_dpwssd_epi32(_sum2_3, _w23, _val0_cdef); #else // 0 0 1 1 2 2 3 3 8 8 9 9 a a b b // 4 4 5 5 6 6 7 7 c c d d e e f f __m256i _val0_0123_89ab = _mm256_unpacklo_epi16(_val0, _val0); __m256i _val0_4567_cdef = _mm256_unpackhi_epi16(_val0, _val0); __m256i _val0_0123 = _mm256_permutevar8x32_epi32(_val0_0123_89ab, _mm256_set_epi32(3, 3, 2, 2, 1, 1, 0, 0)); __m256i _val0_4567 = _mm256_permutevar8x32_epi32(_val0_4567_cdef, _mm256_set_epi32(3, 3, 2, 2, 1, 1, 0, 0)); __m256i _val0_89ab = _mm256_permutevar8x32_epi32(_val0_0123_89ab, _mm256_set_epi32(7, 7, 6, 6, 5, 5, 4, 4)); __m256i _val0_cdef = _mm256_permutevar8x32_epi32(_val0_4567_cdef, _mm256_set_epi32(7, 7, 6, 6, 5, 5, 4, 4)); __m256i _sl00_01 = _mm256_mullo_epi16(_w01, _val0_0123); __m256i _sh00_01 = _mm256_mulhi_epi16(_w01, _val0_0123); __m256i _sl10_11 = _mm256_mullo_epi16(_w01, _val0_89ab); __m256i _sh10_11 = _mm256_mulhi_epi16(_w01, _val0_89ab); __m256i _sl02_03 = _mm256_mullo_epi16(_w23, _val0_4567); __m256i _sh02_03 = _mm256_mulhi_epi16(_w23, _val0_4567); __m256i _sl12_13 = _mm256_mullo_epi16(_w23, _val0_cdef); __m256i _sh12_13 = _mm256_mulhi_epi16(_w23, _val0_cdef); _sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpacklo_epi16(_sl00_01, _sh00_01)); _sum2_3 = _mm256_add_epi32(_sum2_3, _mm256_unpacklo_epi16(_sl10_11, _sh10_11)); _sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpacklo_epi16(_sl02_03, _sh02_03)); _sum2_3 = _mm256_add_epi32(_sum2_3, _mm256_unpacklo_epi16(_sl12_13, _sh12_13)); _sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpackhi_epi16(_sl00_01, _sh00_01)); _sum2_3 = _mm256_add_epi32(_sum2_3, _mm256_unpackhi_epi16(_sl10_11, _sh10_11)); _sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpackhi_epi16(_sl02_03, _sh02_03)); _sum2_3 = _mm256_add_epi32(_sum2_3, _mm256_unpackhi_epi16(_sl12_13, _sh12_13)); #endif __m256i _val1 = _mm256_loadu_si256((const __m256i*)(r0 + 16)); #if __AVXVNNI__ || __AVX512VNNI__ __m256i _val1_0123 = _mm256_permutevar8x32_epi32(_val1, _mm256_set_epi32(1, 1, 1, 1, 0, 0, 0, 0)); __m256i _val1_4567 = _mm256_permutevar8x32_epi32(_val1, _mm256_set_epi32(3, 3, 3, 3, 2, 2, 2, 2)); __m256i _val1_89ab = _mm256_permutevar8x32_epi32(_val1, _mm256_set_epi32(5, 5, 5, 5, 4, 4, 4, 4)); __m256i _val1_cdef = _mm256_permutevar8x32_epi32(_val1, _mm256_set_epi32(7, 7, 7, 7, 6, 6, 6, 6)); _sum4_5 = _mm256_dpwssd_epi32(_sum4_5, _w01, _val1_0123); _sum6_7 = _mm256_dpwssd_epi32(_sum6_7, _w01, _val1_89ab); _sum4_5 = _mm256_dpwssd_epi32(_sum4_5, _w23, _val1_4567); _sum6_7 = _mm256_dpwssd_epi32(_sum6_7, _w23, _val1_cdef); #else __m256i _val1_0123_89ab = _mm256_unpacklo_epi16(_val1, _val1); __m256i _val1_4567_cdef = _mm256_unpackhi_epi16(_val1, _val1); __m256i _val1_0123 = _mm256_permutevar8x32_epi32(_val1_0123_89ab, _mm256_set_epi32(3, 3, 2, 2, 1, 1, 0, 0)); __m256i _val1_4567 = _mm256_permutevar8x32_epi32(_val1_4567_cdef, _mm256_set_epi32(3, 3, 2, 2, 1, 1, 0, 0)); __m256i _val1_89ab = _mm256_permutevar8x32_epi32(_val1_0123_89ab, _mm256_set_epi32(7, 7, 6, 6, 5, 5, 4, 4)); __m256i _val1_cdef = _mm256_permutevar8x32_epi32(_val1_4567_cdef, _mm256_set_epi32(7, 7, 6, 6, 5, 5, 4, 4)); __m256i _sl04_05 = _mm256_mullo_epi16(_w01, _val1_0123); __m256i _sh04_05 = _mm256_mulhi_epi16(_w01, _val1_0123); __m256i _sl14_15 = _mm256_mullo_epi16(_w01, _val1_89ab); __m256i _sh14_15 = _mm256_mulhi_epi16(_w01, _val1_89ab); __m256i _sl06_07 = _mm256_mullo_epi16(_w23, _val1_4567); __m256i _sh06_07 = _mm256_mulhi_epi16(_w23, _val1_4567); __m256i _sl16_17 = _mm256_mullo_epi16(_w23, _val1_cdef); __m256i _sh16_17 = _mm256_mulhi_epi16(_w23, _val1_cdef); _sum4_5 = _mm256_add_epi32(_sum4_5, _mm256_unpacklo_epi16(_sl04_05, _sh04_05)); _sum6_7 = _mm256_add_epi32(_sum6_7, _mm256_unpacklo_epi16(_sl14_15, _sh14_15)); _sum4_5 = _mm256_add_epi32(_sum4_5, _mm256_unpacklo_epi16(_sl06_07, _sh06_07)); _sum6_7 = _mm256_add_epi32(_sum6_7, _mm256_unpacklo_epi16(_sl16_17, _sh16_17)); _sum4_5 = _mm256_add_epi32(_sum4_5, _mm256_unpackhi_epi16(_sl04_05, _sh04_05)); _sum6_7 = _mm256_add_epi32(_sum6_7, _mm256_unpackhi_epi16(_sl14_15, _sh14_15)); _sum4_5 = _mm256_add_epi32(_sum4_5, _mm256_unpackhi_epi16(_sl06_07, _sh06_07)); _sum6_7 = _mm256_add_epi32(_sum6_7, _mm256_unpackhi_epi16(_sl16_17, _sh16_17)); #endif r0 += 32; k0 += 32; } __m256i _sum0_2 = _mm256_permute2x128_si256(_sum0_1, _sum2_3, _MM_SHUFFLE(0, 2, 0, 0)); __m256i _sum1_3 = _mm256_permute2x128_si256(_sum0_1, _sum2_3, _MM_SHUFFLE(0, 3, 0, 1)); _sum0_2 = _mm256_add_epi32(_sum0_2, _sum1_3); __m256i _sum4_6 = _mm256_permute2x128_si256(_sum4_5, _sum6_7, _MM_SHUFFLE(0, 2, 0, 0)); __m256i _sum5_7 = _mm256_permute2x128_si256(_sum4_5, _sum6_7, _MM_SHUFFLE(0, 3, 0, 1)); _sum4_6 = _mm256_add_epi32(_sum4_6, _sum5_7); _mm256_storeu_si256((__m256i*)output0_tm, _sum0_2); _mm256_storeu_si256((__m256i*)(output0_tm + 8), _sum4_6); output0_tm += 16; } #endif for (; i + 1 < tiles; i += 2) { #if __AVX2__ const short* r0 = bb2.row<const short>(i / 4 + (i % 4) / 2); #else const short* r0 = bb2.row<const short>(i / 2); #endif const short* k0 = kernel0_tm.row<const short>(r); int nn = inch; // inch always > 0 #if __AVX2__ __m256i _sum0_1 = _mm256_setzero_si256(); __m256i _sum2_3 = _mm256_setzero_si256(); #else __m128i _sum0 = _mm_setzero_si128(); __m128i _sum1 = _mm_setzero_si128(); __m128i _sum2 = _mm_setzero_si128(); __m128i _sum3 = _mm_setzero_si128(); #endif for (int j = 0; j < nn; j++) { #if __AVX2__ // 0 1 2 3 4 5 6 7 8 9 a b c d e f __m256i _val = _mm256_loadu_si256((const __m256i*)r0); __m256i _w01 = _mm256_loadu_si256((const __m256i*)k0); __m256i _w23 = _mm256_loadu_si256((const __m256i*)(k0 + 16)); #if __AVXVNNI__ || __AVX512VNNI__ __m256i _val_0123 = _mm256_permutevar8x32_epi32(_val, _mm256_set_epi32(1, 1, 1, 1, 0, 0, 0, 0)); __m256i _val_4567 = _mm256_permutevar8x32_epi32(_val, _mm256_set_epi32(3, 3, 3, 3, 2, 2, 2, 2)); __m256i _val_89ab = _mm256_permutevar8x32_epi32(_val, _mm256_set_epi32(5, 5, 5, 5, 4, 4, 4, 4)); __m256i _val_cdef = _mm256_permutevar8x32_epi32(_val, _mm256_set_epi32(7, 7, 7, 7, 6, 6, 6, 6)); _sum0_1 = _mm256_dpwssd_epi32(_sum0_1, _w01, _val_0123); _sum2_3 = _mm256_dpwssd_epi32(_sum2_3, _w01, _val_89ab); _sum0_1 = _mm256_dpwssd_epi32(_sum0_1, _w23, _val_4567); _sum2_3 = _mm256_dpwssd_epi32(_sum2_3, _w23, _val_cdef); #else __m256i _val_0123_89ab = _mm256_unpacklo_epi16(_val, _val); __m256i _val_4567_cdef = _mm256_unpackhi_epi16(_val, _val); __m256i _val_0123 = _mm256_permutevar8x32_epi32(_val_0123_89ab, _mm256_set_epi32(3, 3, 2, 2, 1, 1, 0, 0)); __m256i _val_4567 = _mm256_permutevar8x32_epi32(_val_4567_cdef, _mm256_set_epi32(3, 3, 2, 2, 1, 1, 0, 0)); __m256i _val_89ab = _mm256_permutevar8x32_epi32(_val_0123_89ab, _mm256_set_epi32(7, 7, 6, 6, 5, 5, 4, 4)); __m256i _val_cdef = _mm256_permutevar8x32_epi32(_val_4567_cdef, _mm256_set_epi32(7, 7, 6, 6, 5, 5, 4, 4)); __m256i _sl00_01 = _mm256_mullo_epi16(_w01, _val_0123); __m256i _sh00_01 = _mm256_mulhi_epi16(_w01, _val_0123); __m256i _sl10_11 = _mm256_mullo_epi16(_w01, _val_89ab); __m256i _sh10_11 = _mm256_mulhi_epi16(_w01, _val_89ab); __m256i _sl02_03 = _mm256_mullo_epi16(_w23, _val_4567); __m256i _sh02_03 = _mm256_mulhi_epi16(_w23, _val_4567); __m256i _sl12_13 = _mm256_mullo_epi16(_w23, _val_cdef); __m256i _sh12_13 = _mm256_mulhi_epi16(_w23, _val_cdef); _sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpacklo_epi16(_sl00_01, _sh00_01)); _sum2_3 = _mm256_add_epi32(_sum2_3, _mm256_unpacklo_epi16(_sl10_11, _sh10_11)); _sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpacklo_epi16(_sl02_03, _sh02_03)); _sum2_3 = _mm256_add_epi32(_sum2_3, _mm256_unpacklo_epi16(_sl12_13, _sh12_13)); _sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpackhi_epi16(_sl00_01, _sh00_01)); _sum2_3 = _mm256_add_epi32(_sum2_3, _mm256_unpackhi_epi16(_sl10_11, _sh10_11)); _sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpackhi_epi16(_sl02_03, _sh02_03)); _sum2_3 = _mm256_add_epi32(_sum2_3, _mm256_unpackhi_epi16(_sl12_13, _sh12_13)); #endif #else // 0 1 2 3 4 5 6 7 __m128i _val0 = _mm_loadu_si128((const __m128i*)r0); __m128i _val1 = _mm_loadu_si128((const __m128i*)(r0 + 8)); __m128i _w0 = _mm_loadu_si128((const __m128i*)k0); __m128i _w1 = _mm_loadu_si128((const __m128i*)(k0 + 8)); __m128i _w2 = _mm_loadu_si128((const __m128i*)(k0 + 16)); __m128i _w3 = _mm_loadu_si128((const __m128i*)(k0 + 24)); #if __XOP__ __m128i _val0_01 = _mm_shuffle_epi32(_val0, _MM_SHUFFLE(0, 0, 0, 0)); __m128i _val0_23 = _mm_shuffle_epi32(_val0, _MM_SHUFFLE(1, 1, 1, 1)); __m128i _val0_45 = _mm_shuffle_epi32(_val0, _MM_SHUFFLE(2, 2, 2, 2)); __m128i _val0_67 = _mm_shuffle_epi32(_val0, _MM_SHUFFLE(3, 3, 3, 3)); __m128i _val1_01 = _mm_shuffle_epi32(_val1, _MM_SHUFFLE(0, 0, 0, 0)); __m128i _val1_23 = _mm_shuffle_epi32(_val1, _MM_SHUFFLE(1, 1, 1, 1)); __m128i _val1_45 = _mm_shuffle_epi32(_val1, _MM_SHUFFLE(2, 2, 2, 2)); __m128i _val1_67 = _mm_shuffle_epi32(_val1, _MM_SHUFFLE(3, 3, 3, 3)); _sum0 = _mm_maddd_epi16(_val0_01, _w0, _sum0); _sum1 = _mm_maddd_epi16(_val0_23, _w1, _sum1); _sum2 = _mm_maddd_epi16(_val1_01, _w0, _sum2); _sum3 = _mm_maddd_epi16(_val1_23, _w1, _sum3); _sum0 = _mm_maddd_epi16(_val0_45, _w2, _sum0); _sum1 = _mm_maddd_epi16(_val0_67, _w3, _sum1); _sum2 = _mm_maddd_epi16(_val1_45, _w2, _sum2); _sum3 = _mm_maddd_epi16(_val1_67, _w3, _sum3); #else // 0 0 1 1 2 2 3 3 // 4 4 5 5 6 6 7 7 __m128i _val0_0123 = _mm_unpacklo_epi16(_val0, _val0); __m128i _val0_4567 = _mm_unpackhi_epi16(_val0, _val0); __m128i _val1_0123 = _mm_unpacklo_epi16(_val1, _val1); __m128i _val1_4567 = _mm_unpackhi_epi16(_val1, _val1); __m128i _val0_01 = _mm_unpacklo_epi32(_val0_0123, _val0_0123); __m128i _val0_23 = _mm_unpackhi_epi32(_val0_0123, _val0_0123); __m128i _val0_45 = _mm_unpacklo_epi32(_val0_4567, _val0_4567); __m128i _val0_67 = _mm_unpackhi_epi32(_val0_4567, _val0_4567); __m128i _val1_01 = _mm_unpacklo_epi32(_val1_0123, _val1_0123); __m128i _val1_23 = _mm_unpackhi_epi32(_val1_0123, _val1_0123); __m128i _val1_45 = _mm_unpacklo_epi32(_val1_4567, _val1_4567); __m128i _val1_67 = _mm_unpackhi_epi32(_val1_4567, _val1_4567); __m128i _sl00 = _mm_mullo_epi16(_w0, _val0_01); __m128i _sh00 = _mm_mulhi_epi16(_w0, _val0_01); __m128i _sl10 = _mm_mullo_epi16(_w0, _val1_01); __m128i _sh10 = _mm_mulhi_epi16(_w0, _val1_01); __m128i _sl01 = _mm_mullo_epi16(_w1, _val0_23); __m128i _sh01 = _mm_mulhi_epi16(_w1, _val0_23); __m128i _sl11 = _mm_mullo_epi16(_w1, _val1_23); __m128i _sh11 = _mm_mulhi_epi16(_w1, _val1_23); __m128i _sl02 = _mm_mullo_epi16(_w2, _val0_45); __m128i _sh02 = _mm_mulhi_epi16(_w2, _val0_45); __m128i _sl12 = _mm_mullo_epi16(_w2, _val1_45); __m128i _sh12 = _mm_mulhi_epi16(_w2, _val1_45); __m128i _sl03 = _mm_mullo_epi16(_w3, _val0_67); __m128i _sh03 = _mm_mulhi_epi16(_w3, _val0_67); __m128i _sl13 = _mm_mullo_epi16(_w3, _val1_67); __m128i _sh13 = _mm_mulhi_epi16(_w3, _val1_67); _sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl00, _sh00)); _sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl00, _sh00)); _sum2 = _mm_add_epi32(_sum2, _mm_unpacklo_epi16(_sl10, _sh10)); _sum3 = _mm_add_epi32(_sum3, _mm_unpackhi_epi16(_sl10, _sh10)); _sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl01, _sh01)); _sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl01, _sh01)); _sum2 = _mm_add_epi32(_sum2, _mm_unpacklo_epi16(_sl11, _sh11)); _sum3 = _mm_add_epi32(_sum3, _mm_unpackhi_epi16(_sl11, _sh11)); _sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl02, _sh02)); _sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl02, _sh02)); _sum2 = _mm_add_epi32(_sum2, _mm_unpacklo_epi16(_sl12, _sh12)); _sum3 = _mm_add_epi32(_sum3, _mm_unpackhi_epi16(_sl12, _sh12)); _sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl03, _sh03)); _sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl03, _sh03)); _sum2 = _mm_add_epi32(_sum2, _mm_unpacklo_epi16(_sl13, _sh13)); _sum3 = _mm_add_epi32(_sum3, _mm_unpackhi_epi16(_sl13, _sh13)); #endif #endif r0 += 16; k0 += 32; } #if __AVX2__ __m256i _sum0_2 = _mm256_permute2x128_si256(_sum0_1, _sum2_3, _MM_SHUFFLE(0, 2, 0, 0)); __m256i _sum1_3 = _mm256_permute2x128_si256(_sum0_1, _sum2_3, _MM_SHUFFLE(0, 3, 0, 1)); _sum0_2 = _mm256_add_epi32(_sum0_2, _sum1_3); _mm256_storeu_si256((__m256i*)output0_tm, _sum0_2); #else _sum0 = _mm_add_epi32(_sum0, _sum1); _sum2 = _mm_add_epi32(_sum2, _sum3); _mm_storeu_si128((__m128i*)output0_tm, _sum0); _mm_storeu_si128((__m128i*)(output0_tm + 4), _sum2); #endif output0_tm += 8; } for (; i < tiles; i++) { #if __AVX2__ const short* r0 = bb2.row<const short>(i / 4 + (i % 4) / 2 + i % 2); #else const short* r0 = bb2.row<const short>(i / 2 + i % 2); #endif const short* k0 = kernel0_tm.row<const short>(r); int nn = inch; // inch always > 0 #if __AVX2__ __m256i _sum0_1 = _mm256_setzero_si256(); #else __m128i _sum0 = _mm_setzero_si128(); __m128i _sum1 = _mm_setzero_si128(); #endif for (int j = 0; j < nn; j++) { // 0 1 2 3 4 5 6 7 __m128i _val = _mm_loadu_si128((const __m128i*)r0); #if __AVX2__ __m256i _w01 = _mm256_loadu_si256((const __m256i*)k0); __m256i _w23 = _mm256_loadu_si256((const __m256i*)(k0 + 16)); #if __AVXVNNI__ || __AVX512VNNI__ // 0 1 0 1 x x x x // 0 1 0 1 0 1 0 1 __m128i _val_01 = _mm_shuffle_epi32(_val, _MM_SHUFFLE(0, 0, 0, 0)); __m128i _val_23 = _mm_shuffle_epi32(_val, _MM_SHUFFLE(1, 1, 1, 1)); __m128i _val_45 = _mm_shuffle_epi32(_val, _MM_SHUFFLE(2, 2, 2, 2)); __m128i _val_67 = _mm_shuffle_epi32(_val, _MM_SHUFFLE(3, 3, 3, 3)); __m256i _val_0123 = _mm256_inserti128_si256(_mm256_castsi128_si256(_val_01), _val_23, 1); __m256i _val_4567 = _mm256_inserti128_si256(_mm256_castsi128_si256(_val_45), _val_67, 1); _sum0_1 = _mm256_dpwssd_epi32(_sum0_1, _w01, _val_0123); _sum0_1 = _mm256_dpwssd_epi32(_sum0_1, _w23, _val_4567); #else // 0 0 1 1 2 2 3 3 // 4 4 5 5 6 6 7 7 __m256i _val_0123 = _mm256_castsi128_si256(_mm_unpacklo_epi16(_val, _val)); __m256i _val_4567 = _mm256_castsi128_si256(_mm_unpackhi_epi16(_val, _val)); _val_0123 = _mm256_permutevar8x32_epi32(_val_0123, _mm256_set_epi32(3, 3, 2, 2, 1, 1, 0, 0)); _val_4567 = _mm256_permutevar8x32_epi32(_val_4567, _mm256_set_epi32(3, 3, 2, 2, 1, 1, 0, 0)); __m256i _sl00_01 = _mm256_mullo_epi16(_w01, _val_0123); __m256i _sh00_01 = _mm256_mulhi_epi16(_w01, _val_0123); __m256i _sl02_03 = _mm256_mullo_epi16(_w23, _val_4567); __m256i _sh02_03 = _mm256_mulhi_epi16(_w23, _val_4567); _sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpacklo_epi16(_sl00_01, _sh00_01)); _sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpacklo_epi16(_sl02_03, _sh02_03)); _sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpackhi_epi16(_sl00_01, _sh00_01)); _sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpackhi_epi16(_sl02_03, _sh02_03)); #endif #else __m128i _w0 = _mm_loadu_si128((const __m128i*)k0); __m128i _w1 = _mm_loadu_si128((const __m128i*)(k0 + 8)); __m128i _w2 = _mm_loadu_si128((const __m128i*)(k0 + 16)); __m128i _w3 = _mm_loadu_si128((const __m128i*)(k0 + 24)); #if __XOP__ __m128i _val01 = _mm_shuffle_epi32(_val, _MM_SHUFFLE(0, 0, 0, 0)); __m128i _val23 = _mm_shuffle_epi32(_val, _MM_SHUFFLE(1, 1, 1, 1)); __m128i _val45 = _mm_shuffle_epi32(_val, _MM_SHUFFLE(2, 2, 2, 2)); __m128i _val67 = _mm_shuffle_epi32(_val, _MM_SHUFFLE(3, 3, 3, 3)); _sum0 = _mm_maddd_epi16(_val01, _w0, _sum0); _sum1 = _mm_maddd_epi16(_val23, _w1, _sum1); _sum0 = _mm_maddd_epi16(_val45, _w2, _sum0); _sum1 = _mm_maddd_epi16(_val67, _w3, _sum1); #else // 0 0 1 1 2 2 3 3 // 4 4 5 5 6 6 7 7 __m128i _val_0123 = _mm_unpacklo_epi16(_val, _val); __m128i _val_4567 = _mm_unpackhi_epi16(_val, _val); __m128i _val01 = _mm_unpacklo_epi32(_val_0123, _val_0123); __m128i _val23 = _mm_unpackhi_epi32(_val_0123, _val_0123); __m128i _val45 = _mm_unpacklo_epi32(_val_4567, _val_4567); __m128i _val67 = _mm_unpackhi_epi32(_val_4567, _val_4567); __m128i _sl0 = _mm_mullo_epi16(_w0, _val01); __m128i _sh0 = _mm_mulhi_epi16(_w0, _val01); __m128i _sl1 = _mm_mullo_epi16(_w1, _val23); __m128i _sh1 = _mm_mulhi_epi16(_w1, _val23); __m128i _sl2 = _mm_mullo_epi16(_w2, _val45); __m128i _sh2 = _mm_mulhi_epi16(_w2, _val45); __m128i _sl3 = _mm_mullo_epi16(_w3, _val67); __m128i _sh3 = _mm_mulhi_epi16(_w3, _val67); _sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl0, _sh0)); _sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl0, _sh0)); _sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl1, _sh1)); _sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl1, _sh1)); _sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl2, _sh2)); _sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl2, _sh2)); _sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl3, _sh3)); _sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl3, _sh3)); #endif #endif r0 += 8; k0 += 32; } #if __AVX2__ __m128i _sum0 = _mm256_extracti128_si256(_sum0_1, 0); __m128i _sum1 = _mm256_extracti128_si256(_sum0_1, 1); #endif _sum0 = _mm_add_epi32(_sum0, _sum1); _mm_storeu_si128((__m128i*)output0_tm, _sum0); output0_tm += 4; } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; if (outw == top_blob.w && outh == top_blob.h) { top_blob_bordered = top_blob; } else { top_blob_bordered.create(outw, outh, outch, 4u * 4, 4, opt.workspace_allocator); } { // const float otm[4][6] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f} // }; // 0 = r00 + (r01 + r02) + (r03 + r04) // 1 = (r01 - r02) + (r03 - r04) * 2 // 2 = (r01 + r02) + (r03 + r04) * 4 // 3 = r05 + (r01 - r02) + (r03 - r04) * 8 int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; const int tiles = w_tm / 6 * h_tm / 6; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob_bordered.channel(p); int tmp[4][6][4]; // tile for (int i = 0; i < outh / 4; i++) { for (int j = 0; j < outw / 4; j++) { // top_blob_tm.create(tiles, 36, outch, elemsize, elempack); const int* output0_tm_0 = (const int*)out0_tm + (i * w_tm / 6 + j) * 4; const int* output0_tm_1 = output0_tm_0 + tiles * 4; const int* output0_tm_2 = output0_tm_0 + tiles * 8; const int* output0_tm_3 = output0_tm_0 + tiles * 12; const int* output0_tm_4 = output0_tm_0 + tiles * 16; const int* output0_tm_5 = output0_tm_0 + tiles * 20; int* output0 = out0.row<int>(i * 4) + (j * 4) * 4; // TODO sse optimize for (int m = 0; m < 5; m++) { __m128i _out0tm0 = _mm_loadu_si128((const __m128i*)output0_tm_0); __m128i _out0tm1 = _mm_loadu_si128((const __m128i*)output0_tm_1); __m128i _out0tm2 = _mm_loadu_si128((const __m128i*)output0_tm_2); __m128i _out0tm3 = _mm_loadu_si128((const __m128i*)output0_tm_3); __m128i _out0tm4 = _mm_loadu_si128((const __m128i*)output0_tm_4); __m128i _out0tm5 = _mm_loadu_si128((const __m128i*)output0_tm_5); __m128i _tmp02a = _mm_add_epi32(_out0tm1, _out0tm2); __m128i _tmp13a = _mm_sub_epi32(_out0tm1, _out0tm2); __m128i _tmp02b = _mm_add_epi32(_out0tm3, _out0tm4); __m128i _tmp13b = _mm_sub_epi32(_out0tm3, _out0tm4); __m128i _tmp0m = _mm_add_epi32(_mm_add_epi32(_out0tm0, _tmp02a), _tmp02b); __m128i _tmp1m = _mm_add_epi32(_tmp13a, _mm_slli_epi32(_tmp13b, 1)); __m128i _tmp2m = _mm_add_epi32(_tmp02a, _mm_slli_epi32(_tmp02b, 2)); __m128i _tmp3m = _mm_add_epi32(_mm_add_epi32(_tmp13a, _mm_slli_epi32(_out0tm5, 2)), _mm_slli_epi32(_tmp13b, 3)); _mm_storeu_si128((__m128i*)tmp[0][m], _tmp0m); _mm_storeu_si128((__m128i*)tmp[1][m], _tmp1m); _mm_storeu_si128((__m128i*)tmp[2][m], _tmp2m); _mm_storeu_si128((__m128i*)tmp[3][m], _tmp3m); output0_tm_0 += tiles * 24; output0_tm_1 += tiles * 24; output0_tm_2 += tiles * 24; output0_tm_3 += tiles * 24; output0_tm_4 += tiles * 24; output0_tm_5 += tiles * 24; } for (int m = 5; m < 6; m++) { __m128i _out0tm0 = _mm_loadu_si128((const __m128i*)output0_tm_0); __m128i _out0tm1 = _mm_loadu_si128((const __m128i*)output0_tm_1); __m128i _out0tm2 = _mm_loadu_si128((const __m128i*)output0_tm_2); __m128i _out0tm3 = _mm_loadu_si128((const __m128i*)output0_tm_3); __m128i _out0tm4 = _mm_loadu_si128((const __m128i*)output0_tm_4); __m128i _out0tm5 = _mm_loadu_si128((const __m128i*)output0_tm_5); __m128i _tmp02a = _mm_add_epi32(_out0tm1, _out0tm2); __m128i _tmp13a = _mm_sub_epi32(_out0tm1, _out0tm2); __m128i _tmp02b = _mm_add_epi32(_out0tm3, _out0tm4); __m128i _tmp13b = _mm_sub_epi32(_out0tm3, _out0tm4); __m128i _tmp0m = _mm_add_epi32(_mm_add_epi32(_out0tm0, _tmp02a), _tmp02b); __m128i _tmp1m = _mm_add_epi32(_tmp13a, _mm_slli_epi32(_tmp13b, 1)); __m128i _tmp2m = _mm_add_epi32(_tmp02a, _mm_slli_epi32(_tmp02b, 2)); __m128i _tmp3m = _mm_add_epi32(_mm_add_epi32(_tmp13a, _mm_slli_epi32(_out0tm5, 2)), _mm_slli_epi32(_tmp13b, 3)); _tmp0m = _mm_slli_epi32(_tmp0m, 2); _tmp1m = _mm_slli_epi32(_tmp1m, 2); _tmp2m = _mm_slli_epi32(_tmp2m, 2); _tmp3m = _mm_slli_epi32(_tmp3m, 2); _mm_storeu_si128((__m128i*)tmp[0][m], _tmp0m); _mm_storeu_si128((__m128i*)tmp[1][m], _tmp1m); _mm_storeu_si128((__m128i*)tmp[2][m], _tmp2m); _mm_storeu_si128((__m128i*)tmp[3][m], _tmp3m); output0_tm_0 += tiles * 24; output0_tm_1 += tiles * 24; output0_tm_2 += tiles * 24; output0_tm_3 += tiles * 24; output0_tm_4 += tiles * 24; output0_tm_5 += tiles * 24; } for (int m = 0; m < 4; m++) { __m128i _tmp00 = _mm_loadu_si128((const __m128i*)tmp[m][0]); __m128i _tmp01 = _mm_loadu_si128((const __m128i*)tmp[m][1]); __m128i _tmp02 = _mm_loadu_si128((const __m128i*)tmp[m][2]); __m128i _tmp03 = _mm_loadu_si128((const __m128i*)tmp[m][3]); __m128i _tmp04 = _mm_loadu_si128((const __m128i*)tmp[m][4]); __m128i _tmp05 = _mm_loadu_si128((const __m128i*)tmp[m][5]); __m128i _tmp02a = _mm_add_epi32(_tmp01, _tmp02); __m128i _tmp13a = _mm_sub_epi32(_tmp01, _tmp02); __m128i _tmp02b = _mm_add_epi32(_tmp03, _tmp04); __m128i _tmp13b = _mm_sub_epi32(_tmp03, _tmp04); __m128i _out00 = _mm_add_epi32(_mm_add_epi32(_tmp00, _tmp02a), _tmp02b); __m128i _out01 = _mm_add_epi32(_tmp13a, _mm_slli_epi32(_tmp13b, 1)); __m128i _out02 = _mm_add_epi32(_tmp02a, _mm_slli_epi32(_tmp02b, 2)); __m128i _out03 = _mm_add_epi32(_mm_add_epi32(_tmp05, _tmp13a), _mm_slli_epi32(_tmp13b, 3)); // TODO use integer trick for division by 576 __m128 _v576 = _mm_set1_ps(1.0 / 576); _out00 = _mm_cvttps_epi32(_mm_mul_ps(_mm_cvtepi32_ps(_out00), _v576)); _out01 = _mm_cvttps_epi32(_mm_mul_ps(_mm_cvtepi32_ps(_out01), _v576)); _out02 = _mm_cvttps_epi32(_mm_mul_ps(_mm_cvtepi32_ps(_out02), _v576)); _out03 = _mm_cvttps_epi32(_mm_mul_ps(_mm_cvtepi32_ps(_out03), _v576)); _mm_storeu_si128((__m128i*)output0, _out00); _mm_storeu_si128((__m128i*)(output0 + 4), _out01); _mm_storeu_si128((__m128i*)(output0 + 8), _out02); _mm_storeu_si128((__m128i*)(output0 + 12), _out03); output0 += outw * 4; } } } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); }
GB_unop__identity_uint64_uint64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_uint64_uint64 // op(A') function: GB_unop_tran__identity_uint64_uint64 // C type: uint64_t // A type: uint64_t // cast: uint64_t cij = aij // unaryop: cij = aij #define GB_ATYPE \ uint64_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint64_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint64_t z = aij ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 1 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_uint64_uint64 ( uint64_t *Cx, // Cx and Ax may be aliased const uint64_t *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (uint64_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint64_t aij = Ax [p] ; uint64_t z = aij ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint64_t aij = Ax [p] ; uint64_t z = aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_uint64_uint64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
heat_2d-a.pluto.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Discretized 2D heat equation stencil with non periodic boundary conditions * Adapted from Pochoir test bench */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> #include <math.h> /* * N is the number of points * T is the number of timesteps */ #ifdef HAS_DECLS #include "decls.h" #else #define N 16000L #define T 16000L #endif #define NUM_FP_OPS 10 /* Define our arrays */ double A[2][N][N]; double total=0; double sum_err_sqr=0; int chtotal=0; int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; return x->tv_sec < y->tv_sec; } int main(int argc, char * argv[]) { long int t, i, j, k; const int BASE = 1024; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0; printf("Number of points = %ld\t|Number of timesteps = %ld\t", N*N, T); /* Initialization */ srand(42); // seed with a constant value to verify results for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { A[0][i][j] = 1.0 * (rand() % BASE); } } #ifdef TIME gettimeofday(&start, 0); #endif // #undef N // #define N 8000L #undef T #define T 8000 /* Copyright (C) 1991-2012 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* We do support the IEC 559 math functionality, real and complex. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((N >= 1) && (T >= 1)) { for (t1=-1;t1<=floord(T-1,2);t1++) { lbp=ceild(t1,2); ubp=min(floord(2*T+N-2,8),floord(4*t1+N+2,8)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(0,ceild(t1-255,256));t3<=min(floord(2*T+N-2,1024),floord(4*t1+N+6,1024));t3++) { if ((t1 <= floord(1024*t3-N,4)) && (t2 <= 128*t3-1) && (t3 >= ceild(N,1024))) { if (N%2 == 0) { for (t5=max(max(8*t2,1024*t3-N+1),-8*t1+8*t2+2048*t3-2*N-5);t5<=min(8*t2+7,-8*t1+8*t2+2048*t3-2*N+2);t5++) { A[0][(-1024*t3+t5+N-1)][(N-1)] = 0.125*(((-1024*t3+t5+N-1)-1) < 0 ? 0: A[1][(-1024*t3+t5+N-1) - 1][(N-1)]) + 0.125*(((N-1)+1) >= N ? 0 : A[1][(-1024*t3+t5+N-1)][(N-1) + 1]) + 0.125*(((-1024*t3+t5+N-1)+1) >= N ? 0 : A[1][(-1024*t3+t5+N-1) + 1][(N-1)]) + 0.125*(((N-1)-1) < 0 ? 0 : A[1][(-1024*t3+t5+N-1)][(N-1) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[1][(-1024*t3+t5+N-1)][(N-1)];; } } } if ((t1 <= floord(8*t2-N,4)) && (t2 >= ceild(N,8))) { if (N%2 == 0) { for (t6=max(1024*t3,8*t2-N+1);t6<=min(8*t2,1024*t3+1023);t6++) { A[0][(N-1)][(-8*t2+t6+N-1)] = 0.125*(((N-1)-1) < 0 ? 0: A[1][(N-1) - 1][(-8*t2+t6+N-1)]) + 0.125*(((-8*t2+t6+N-1)+1) >= N ? 0 : A[1][(N-1)][(-8*t2+t6+N-1) + 1]) + 0.125*(((N-1)+1) >= N ? 0 : A[1][(N-1) + 1][(-8*t2+t6+N-1)]) + 0.125*(((-8*t2+t6+N-1)-1) < 0 ? 0 : A[1][(N-1)][(-8*t2+t6+N-1) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[1][(N-1)][(-8*t2+t6+N-1)];; } } } if ((N >= 2) && (t1 == 2*t2) && (t1 <= floord(1024*t3-N+1023,4)) && (t1 >= ceild(1024*t3-N+1,4))) { for (t6=max(4*t1,1024*t3);t6<=4*t1+N-1;t6++) { if (t1%2 == 0) { A[1][0][(-4*t1+t6)] = 0.125*((0 -1) < 0 ? 0: A[0][0 - 1][(-4*t1+t6)]) + 0.125*(((-4*t1+t6)+1) >= N ? 0 : A[0][0][(-4*t1+t6) + 1]) + 0.125*((0 +1) >= N ? 0 : A[0][0 + 1][(-4*t1+t6)]) + 0.125*(((-4*t1+t6)-1) < 0 ? 0 : A[0][0][(-4*t1+t6) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[0][0][(-4*t1+t6)];; } } for (t5=4*t1+1;t5<=4*t1+2;t5++) { for (t6=max(1024*t3,4*t1+1);t6<=4*t1+N;t6++) { if (t1%2 == 0) { A[0][(-4*t1+t5-1)][(-4*t1+t6-1)] = 0.125*(((-4*t1+t5-1)-1) < 0 ? 0: A[1][(-4*t1+t5-1) - 1][(-4*t1+t6-1)]) + 0.125*(((-4*t1+t6-1)+1) >= N ? 0 : A[1][(-4*t1+t5-1)][(-4*t1+t6-1) + 1]) + 0.125*(((-4*t1+t5-1)+1) >= N ? 0 : A[1][(-4*t1+t5-1) + 1][(-4*t1+t6-1)]) + 0.125*(((-4*t1+t6-1)-1) < 0 ? 0 : A[1][(-4*t1+t5-1)][(-4*t1+t6-1) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[1][(-4*t1+t5-1)][(-4*t1+t6-1)];; } } } } if ((t1 == 2*t2) && (t1 >= ceild(1024*t3-N+1024,4))) { for (t6=max(4*t1,1024*t3);t6<=1024*t3+1023;t6++) { if (t1%2 == 0) { A[1][0][(-4*t1+t6)] = 0.125*((0 -1) < 0 ? 0: A[0][0 - 1][(-4*t1+t6)]) + 0.125*(((-4*t1+t6)+1) >= N ? 0 : A[0][0][(-4*t1+t6) + 1]) + 0.125*((0 +1) >= N ? 0 : A[0][0 + 1][(-4*t1+t6)]) + 0.125*(((-4*t1+t6)-1) < 0 ? 0 : A[0][0][(-4*t1+t6) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[0][0][(-4*t1+t6)];; } } for (t5=4*t1+1;t5<=4*t1+2;t5++) { for (t6=max(1024*t3,4*t1+1);t6<=1024*t3+1023;t6++) { if (t1%2 == 0) { A[0][(-4*t1+t5-1)][(-4*t1+t6-1)] = 0.125*(((-4*t1+t5-1)-1) < 0 ? 0: A[1][(-4*t1+t5-1) - 1][(-4*t1+t6-1)]) + 0.125*(((-4*t1+t6-1)+1) >= N ? 0 : A[1][(-4*t1+t5-1)][(-4*t1+t6-1) + 1]) + 0.125*(((-4*t1+t5-1)+1) >= N ? 0 : A[1][(-4*t1+t5-1) + 1][(-4*t1+t6-1)]) + 0.125*(((-4*t1+t6-1)-1) < 0 ? 0 : A[1][(-4*t1+t5-1)][(-4*t1+t6-1) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[1][(-4*t1+t5-1)][(-4*t1+t6-1)];; } } } } if ((N == 1) && (t1 == 2*t2)) { if (t1%2 == 0) { A[1][0][0] = 0.125*((0 -1) < 0 ? 0: A[0][0 - 1][0]) + 0.125*((0 +1) >= N ? 0 : A[0][0][0 + 1]) + 0.125*((0 +1) >= N ? 0 : A[0][0 + 1][0]) + 0.125*((0 -1) < 0 ? 0 : A[0][0][0 - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[0][0][0];; } if (t1%2 == 0) { A[0][0][0] = 0.125*((0 -1) < 0 ? 0: A[1][0 - 1][0]) + 0.125*((0 +1) >= N ? 0 : A[1][0][0 + 1]) + 0.125*((0 +1) >= N ? 0 : A[1][0 + 1][0]) + 0.125*((0 -1) < 0 ? 0 : A[1][0][0 - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[1][0][0];; } } for (t4=max(max(max(0,ceild(1024*t3-N+1,2)),2*t1),4*t1-4*t2+4);t4<=min(min(min(min(floord(1024*t3-N+1023,2),floord(8*t1-8*t2+N-1,2)),T-1),2*t1+1),512*t3-1);t4++) { for (t5=8*t2;t5<=-8*t1+8*t2+4*t4;t5++) { for (t6=1024*t3;t6<=2*t4+N-1;t6++) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.125*(((-2*t4+t5)-1) < 0 ? 0: A[0][(-2*t4+t5) - 1][(-2*t4+t6)]) + 0.125*(((-2*t4+t6)+1) >= N ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + 0.125*(((-2*t4+t5)+1) >= N ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6)]) + 0.125*(((-2*t4+t6)-1) < 0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[0][(-2*t4+t5)][(-2*t4+t6)];; A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.125*(((-2*t4+t5-1)-1) < 0 ? 0: A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)]) + 0.125*(((-2*t4+t6-1)+1) >= N ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + 0.125*(((-2*t4+t5-1)+1) >= N ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)]) + 0.125*(((-2*t4+t6-1)-1) < 0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[1][(-2*t4+t5-1)][(-2*t4+t6-1)];; } A[0][(-2*t4+t5-1)][(N-1)] = 0.125*(((-2*t4+t5-1)-1) < 0 ? 0: A[1][(-2*t4+t5-1) - 1][(N-1)]) + 0.125*(((N-1)+1) >= N ? 0 : A[1][(-2*t4+t5-1)][(N-1) + 1]) + 0.125*(((-2*t4+t5-1)+1) >= N ? 0 : A[1][(-2*t4+t5-1) + 1][(N-1)]) + 0.125*(((N-1)-1) < 0 ? 0 : A[1][(-2*t4+t5-1)][(N-1) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[1][(-2*t4+t5-1)][(N-1)];; } for (t5=-8*t1+8*t2+4*t4+1;t5<=min(2*t4+N,-8*t1+8*t2+4*t4+2);t5++) { for (t6=1024*t3;t6<=2*t4+N;t6++) { A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.125*(((-2*t4+t5-1)-1) < 0 ? 0: A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)]) + 0.125*(((-2*t4+t6-1)+1) >= N ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + 0.125*(((-2*t4+t5-1)+1) >= N ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)]) + 0.125*(((-2*t4+t6-1)-1) < 0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[1][(-2*t4+t5-1)][(-2*t4+t6-1)];; } } } if (t1 == 2*t2-1) { for (t4=max(max(0,ceild(1024*t3-N+1,2)),2*t1);t4<=min(min(min(min(floord(4*t1+N-5,2),floord(1024*t3-N+1023,2)),T-1),2*t1+1),512*t3-1);t4++) { for (t5=4*t1+4;t5<=-4*t1+4*t4+4;t5++) { for (t6=1024*t3;t6<=2*t4+N-1;t6++) { if ((t1+1)%2 == 0) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.125*(((-2*t4+t5)-1) < 0 ? 0: A[0][(-2*t4+t5) - 1][(-2*t4+t6)]) + 0.125*(((-2*t4+t6)+1) >= N ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + 0.125*(((-2*t4+t5)+1) >= N ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6)]) + 0.125*(((-2*t4+t6)-1) < 0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[0][(-2*t4+t5)][(-2*t4+t6)];; } if ((t1+1)%2 == 0) { A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.125*(((-2*t4+t5-1)-1) < 0 ? 0: A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)]) + 0.125*(((-2*t4+t6-1)+1) >= N ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + 0.125*(((-2*t4+t5-1)+1) >= N ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)]) + 0.125*(((-2*t4+t6-1)-1) < 0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[1][(-2*t4+t5-1)][(-2*t4+t6-1)];; } } if ((t1+1)%2 == 0) { A[0][(-2*t4+t5-1)][(N-1)] = 0.125*(((-2*t4+t5-1)-1) < 0 ? 0: A[1][(-2*t4+t5-1) - 1][(N-1)]) + 0.125*(((N-1)+1) >= N ? 0 : A[1][(-2*t4+t5-1)][(N-1) + 1]) + 0.125*(((-2*t4+t5-1)+1) >= N ? 0 : A[1][(-2*t4+t5-1) + 1][(N-1)]) + 0.125*(((N-1)-1) < 0 ? 0 : A[1][(-2*t4+t5-1)][(N-1) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[1][(-2*t4+t5-1)][(N-1)];; } } for (t5=-4*t1+4*t4+5;t5<=min(2*t4+N,-4*t1+4*t4+6);t5++) { for (t6=1024*t3;t6<=2*t4+N;t6++) { if ((t1+1)%2 == 0) { A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.125*(((-2*t4+t5-1)-1) < 0 ? 0: A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)]) + 0.125*(((-2*t4+t6-1)+1) >= N ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + 0.125*(((-2*t4+t5-1)+1) >= N ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)]) + 0.125*(((-2*t4+t6-1)-1) < 0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[1][(-2*t4+t5-1)][(-2*t4+t6-1)];; } } } } } for (t4=max(max(max(0,ceild(1024*t3-N+1024,2)),2*t1),4*t1-4*t2+4);t4<=min(min(min(floord(8*t1-8*t2+N-1,2),T-1),2*t1+1),512*t3-1);t4++) { for (t5=8*t2;t5<=-8*t1+8*t2+4*t4;t5++) { for (t6=1024*t3;t6<=1024*t3+1023;t6++) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.125*(((-2*t4+t5)-1) < 0 ? 0: A[0][(-2*t4+t5) - 1][(-2*t4+t6)]) + 0.125*(((-2*t4+t6)+1) >= N ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + 0.125*(((-2*t4+t5)+1) >= N ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6)]) + 0.125*(((-2*t4+t6)-1) < 0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[0][(-2*t4+t5)][(-2*t4+t6)];; A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.125*(((-2*t4+t5-1)-1) < 0 ? 0: A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)]) + 0.125*(((-2*t4+t6-1)+1) >= N ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + 0.125*(((-2*t4+t5-1)+1) >= N ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)]) + 0.125*(((-2*t4+t6-1)-1) < 0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[1][(-2*t4+t5-1)][(-2*t4+t6-1)];; } } for (t5=-8*t1+8*t2+4*t4+1;t5<=min(2*t4+N,-8*t1+8*t2+4*t4+2);t5++) { for (t6=1024*t3;t6<=1024*t3+1023;t6++) { A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.125*(((-2*t4+t5-1)-1) < 0 ? 0: A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)]) + 0.125*(((-2*t4+t6-1)+1) >= N ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + 0.125*(((-2*t4+t5-1)+1) >= N ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)]) + 0.125*(((-2*t4+t6-1)-1) < 0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[1][(-2*t4+t5-1)][(-2*t4+t6-1)];; } } } if (t1 == 2*t2-1) { for (t4=max(max(0,ceild(1024*t3-N+1024,2)),2*t1);t4<=min(min(T-1,2*t1+1),512*t3-1);t4++) { for (t5=4*t1+4;t5<=-4*t1+4*t4+4;t5++) { for (t6=1024*t3;t6<=1024*t3+1023;t6++) { if ((t1+1)%2 == 0) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.125*(((-2*t4+t5)-1) < 0 ? 0: A[0][(-2*t4+t5) - 1][(-2*t4+t6)]) + 0.125*(((-2*t4+t6)+1) >= N ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + 0.125*(((-2*t4+t5)+1) >= N ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6)]) + 0.125*(((-2*t4+t6)-1) < 0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[0][(-2*t4+t5)][(-2*t4+t6)];; } if ((t1+1)%2 == 0) { A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.125*(((-2*t4+t5-1)-1) < 0 ? 0: A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)]) + 0.125*(((-2*t4+t6-1)+1) >= N ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + 0.125*(((-2*t4+t5-1)+1) >= N ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)]) + 0.125*(((-2*t4+t6-1)-1) < 0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[1][(-2*t4+t5-1)][(-2*t4+t6-1)];; } } } for (t5=-4*t1+4*t4+5;t5<=-4*t1+4*t4+6;t5++) { for (t6=1024*t3;t6<=1024*t3+1023;t6++) { if ((t1+1)%2 == 0) { A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.125*(((-2*t4+t5-1)-1) < 0 ? 0: A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)]) + 0.125*(((-2*t4+t6-1)+1) >= N ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + 0.125*(((-2*t4+t5-1)+1) >= N ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)]) + 0.125*(((-2*t4+t6-1)-1) < 0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[1][(-2*t4+t5-1)][(-2*t4+t6-1)];; } } } } } if ((N >= 3) && (t1 == 2*t2) && (t1 <= min(floord(T-2,2),floord(1024*t3-N+1021,4))) && (t1 >= 256*t3)) { for (t6=4*t1+2;t6<=4*t1+N+1;t6++) { if (t1%2 == 0) { A[1][0][(-4*t1+t6-2)] = 0.125*((0 -1) < 0 ? 0: A[0][0 - 1][(-4*t1+t6-2)]) + 0.125*(((-4*t1+t6-2)+1) >= N ? 0 : A[0][0][(-4*t1+t6-2) + 1]) + 0.125*((0 +1) >= N ? 0 : A[0][0 + 1][(-4*t1+t6-2)]) + 0.125*(((-4*t1+t6-2)-1) < 0 ? 0 : A[0][0][(-4*t1+t6-2) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[0][0][(-4*t1+t6-2)];; } } for (t5=4*t1+3;t5<=4*t1+4;t5++) { if (t1%2 == 0) { A[1][(-4*t1+t5-2)][0] = 0.125*(((-4*t1+t5-2)-1) < 0 ? 0: A[0][(-4*t1+t5-2) - 1][0]) + 0.125*((0 +1) >= N ? 0 : A[0][(-4*t1+t5-2)][0 + 1]) + 0.125*(((-4*t1+t5-2)+1) >= N ? 0 : A[0][(-4*t1+t5-2) + 1][0]) + 0.125*((0 -1) < 0 ? 0 : A[0][(-4*t1+t5-2)][0 - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[0][(-4*t1+t5-2)][0];; } for (t6=4*t1+3;t6<=4*t1+N+1;t6++) { if (t1%2 == 0) { A[1][(-4*t1+t5-2)][(-4*t1+t6-2)] = 0.125*(((-4*t1+t5-2)-1) < 0 ? 0: A[0][(-4*t1+t5-2) - 1][(-4*t1+t6-2)]) + 0.125*(((-4*t1+t6-2)+1) >= N ? 0 : A[0][(-4*t1+t5-2)][(-4*t1+t6-2) + 1]) + 0.125*(((-4*t1+t5-2)+1) >= N ? 0 : A[0][(-4*t1+t5-2) + 1][(-4*t1+t6-2)]) + 0.125*(((-4*t1+t6-2)-1) < 0 ? 0 : A[0][(-4*t1+t5-2)][(-4*t1+t6-2) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[0][(-4*t1+t5-2)][(-4*t1+t6-2)];; } if (t1%2 == 0) { A[0][(-4*t1+t5-3)][(-4*t1+t6-3)] = 0.125*(((-4*t1+t5-3)-1) < 0 ? 0: A[1][(-4*t1+t5-3) - 1][(-4*t1+t6-3)]) + 0.125*(((-4*t1+t6-3)+1) >= N ? 0 : A[1][(-4*t1+t5-3)][(-4*t1+t6-3) + 1]) + 0.125*(((-4*t1+t5-3)+1) >= N ? 0 : A[1][(-4*t1+t5-3) + 1][(-4*t1+t6-3)]) + 0.125*(((-4*t1+t6-3)-1) < 0 ? 0 : A[1][(-4*t1+t5-3)][(-4*t1+t6-3) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[1][(-4*t1+t5-3)][(-4*t1+t6-3)];; } } if (t1%2 == 0) { A[0][(-4*t1+t5-3)][(N-1)] = 0.125*(((-4*t1+t5-3)-1) < 0 ? 0: A[1][(-4*t1+t5-3) - 1][(N-1)]) + 0.125*(((N-1)+1) >= N ? 0 : A[1][(-4*t1+t5-3)][(N-1) + 1]) + 0.125*(((-4*t1+t5-3)+1) >= N ? 0 : A[1][(-4*t1+t5-3) + 1][(N-1)]) + 0.125*(((N-1)-1) < 0 ? 0 : A[1][(-4*t1+t5-3)][(N-1) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[1][(-4*t1+t5-3)][(N-1)];; } } for (t5=4*t1+5;t5<=min(4*t1+6,4*t1+N+2);t5++) { for (t6=4*t1+3;t6<=4*t1+N+2;t6++) { if (t1%2 == 0) { A[0][(-4*t1+t5-3)][(-4*t1+t6-3)] = 0.125*(((-4*t1+t5-3)-1) < 0 ? 0: A[1][(-4*t1+t5-3) - 1][(-4*t1+t6-3)]) + 0.125*(((-4*t1+t6-3)+1) >= N ? 0 : A[1][(-4*t1+t5-3)][(-4*t1+t6-3) + 1]) + 0.125*(((-4*t1+t5-3)+1) >= N ? 0 : A[1][(-4*t1+t5-3) + 1][(-4*t1+t6-3)]) + 0.125*(((-4*t1+t6-3)-1) < 0 ? 0 : A[1][(-4*t1+t5-3)][(-4*t1+t6-3) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[1][(-4*t1+t5-3)][(-4*t1+t6-3)];; } } } } if ((t1 == 2*t2) && (t1 <= min(min(floord(T-2,2),floord(1024*t3-N+1021,4)),256*t3-2)) && (t1 >= ceild(1024*t3-N-1,4))) { for (t6=1024*t3;t6<=4*t1+N+1;t6++) { if (t1%2 == 0) { A[1][0][(-4*t1+t6-2)] = 0.125*((0 -1) < 0 ? 0: A[0][0 - 1][(-4*t1+t6-2)]) + 0.125*(((-4*t1+t6-2)+1) >= N ? 0 : A[0][0][(-4*t1+t6-2) + 1]) + 0.125*((0 +1) >= N ? 0 : A[0][0 + 1][(-4*t1+t6-2)]) + 0.125*(((-4*t1+t6-2)-1) < 0 ? 0 : A[0][0][(-4*t1+t6-2) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[0][0][(-4*t1+t6-2)];; } } for (t5=4*t1+3;t5<=4*t1+4;t5++) { for (t6=1024*t3;t6<=4*t1+N+1;t6++) { if (t1%2 == 0) { A[1][(-4*t1+t5-2)][(-4*t1+t6-2)] = 0.125*(((-4*t1+t5-2)-1) < 0 ? 0: A[0][(-4*t1+t5-2) - 1][(-4*t1+t6-2)]) + 0.125*(((-4*t1+t6-2)+1) >= N ? 0 : A[0][(-4*t1+t5-2)][(-4*t1+t6-2) + 1]) + 0.125*(((-4*t1+t5-2)+1) >= N ? 0 : A[0][(-4*t1+t5-2) + 1][(-4*t1+t6-2)]) + 0.125*(((-4*t1+t6-2)-1) < 0 ? 0 : A[0][(-4*t1+t5-2)][(-4*t1+t6-2) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[0][(-4*t1+t5-2)][(-4*t1+t6-2)];; } if (t1%2 == 0) { A[0][(-4*t1+t5-3)][(-4*t1+t6-3)] = 0.125*(((-4*t1+t5-3)-1) < 0 ? 0: A[1][(-4*t1+t5-3) - 1][(-4*t1+t6-3)]) + 0.125*(((-4*t1+t6-3)+1) >= N ? 0 : A[1][(-4*t1+t5-3)][(-4*t1+t6-3) + 1]) + 0.125*(((-4*t1+t5-3)+1) >= N ? 0 : A[1][(-4*t1+t5-3) + 1][(-4*t1+t6-3)]) + 0.125*(((-4*t1+t6-3)-1) < 0 ? 0 : A[1][(-4*t1+t5-3)][(-4*t1+t6-3) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[1][(-4*t1+t5-3)][(-4*t1+t6-3)];; } } if (t1%2 == 0) { A[0][(-4*t1+t5-3)][(N-1)] = 0.125*(((-4*t1+t5-3)-1) < 0 ? 0: A[1][(-4*t1+t5-3) - 1][(N-1)]) + 0.125*(((N-1)+1) >= N ? 0 : A[1][(-4*t1+t5-3)][(N-1) + 1]) + 0.125*(((-4*t1+t5-3)+1) >= N ? 0 : A[1][(-4*t1+t5-3) + 1][(N-1)]) + 0.125*(((N-1)-1) < 0 ? 0 : A[1][(-4*t1+t5-3)][(N-1) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[1][(-4*t1+t5-3)][(N-1)];; } } for (t5=4*t1+5;t5<=4*t1+6;t5++) { for (t6=1024*t3;t6<=4*t1+N+2;t6++) { if (t1%2 == 0) { A[0][(-4*t1+t5-3)][(-4*t1+t6-3)] = 0.125*(((-4*t1+t5-3)-1) < 0 ? 0: A[1][(-4*t1+t5-3) - 1][(-4*t1+t6-3)]) + 0.125*(((-4*t1+t6-3)+1) >= N ? 0 : A[1][(-4*t1+t5-3)][(-4*t1+t6-3) + 1]) + 0.125*(((-4*t1+t5-3)+1) >= N ? 0 : A[1][(-4*t1+t5-3) + 1][(-4*t1+t6-3)]) + 0.125*(((-4*t1+t6-3)-1) < 0 ? 0 : A[1][(-4*t1+t5-3)][(-4*t1+t6-3) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[1][(-4*t1+t5-3)][(-4*t1+t6-3)];; } } } } if (t1 == 2*t2) { for (t4=max(ceild(1024*t3-N+1,2),2*t1+2);t4<=min(min(min(floord(1024*t3-N+1023,2),T-1),2*t1+3),512*t3-1);t4++) { for (t6=1024*t3;t6<=2*t4+N-1;t6++) { if (t1%2 == 0) { A[1][0][(-2*t4+t6)] = 0.125*((0 -1) < 0 ? 0: A[0][0 - 1][(-2*t4+t6)]) + 0.125*(((-2*t4+t6)+1) >= N ? 0 : A[0][0][(-2*t4+t6) + 1]) + 0.125*((0 +1) >= N ? 0 : A[0][0 + 1][(-2*t4+t6)]) + 0.125*(((-2*t4+t6)-1) < 0 ? 0 : A[0][0][(-2*t4+t6) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[0][0][(-2*t4+t6)];; } } for (t5=2*t4+1;t5<=4*t1+7;t5++) { for (t6=1024*t3;t6<=2*t4+N-1;t6++) { if (t1%2 == 0) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.125*(((-2*t4+t5)-1) < 0 ? 0: A[0][(-2*t4+t5) - 1][(-2*t4+t6)]) + 0.125*(((-2*t4+t6)+1) >= N ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + 0.125*(((-2*t4+t5)+1) >= N ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6)]) + 0.125*(((-2*t4+t6)-1) < 0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[0][(-2*t4+t5)][(-2*t4+t6)];; } if (t1%2 == 0) { A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.125*(((-2*t4+t5-1)-1) < 0 ? 0: A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)]) + 0.125*(((-2*t4+t6-1)+1) >= N ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + 0.125*(((-2*t4+t5-1)+1) >= N ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)]) + 0.125*(((-2*t4+t6-1)-1) < 0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[1][(-2*t4+t5-1)][(-2*t4+t6-1)];; } } if (t1%2 == 0) { A[0][(-2*t4+t5-1)][(N-1)] = 0.125*(((-2*t4+t5-1)-1) < 0 ? 0: A[1][(-2*t4+t5-1) - 1][(N-1)]) + 0.125*(((N-1)+1) >= N ? 0 : A[1][(-2*t4+t5-1)][(N-1) + 1]) + 0.125*(((-2*t4+t5-1)+1) >= N ? 0 : A[1][(-2*t4+t5-1) + 1][(N-1)]) + 0.125*(((N-1)-1) < 0 ? 0 : A[1][(-2*t4+t5-1)][(N-1) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[1][(-2*t4+t5-1)][(N-1)];; } } } } if ((t1 == 2*t2) && (t1 <= floord(T-2,2)) && (t1 >= max(ceild(1024*t3-N+1022,4),256*t3))) { for (t6=4*t1+2;t6<=1024*t3+1023;t6++) { if (t1%2 == 0) { A[1][0][(-4*t1+t6-2)] = 0.125*((0 -1) < 0 ? 0: A[0][0 - 1][(-4*t1+t6-2)]) + 0.125*(((-4*t1+t6-2)+1) >= N ? 0 : A[0][0][(-4*t1+t6-2) + 1]) + 0.125*((0 +1) >= N ? 0 : A[0][0 + 1][(-4*t1+t6-2)]) + 0.125*(((-4*t1+t6-2)-1) < 0 ? 0 : A[0][0][(-4*t1+t6-2) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[0][0][(-4*t1+t6-2)];; } } for (t5=4*t1+3;t5<=4*t1+4;t5++) { if (t1%2 == 0) { A[1][(-4*t1+t5-2)][0] = 0.125*(((-4*t1+t5-2)-1) < 0 ? 0: A[0][(-4*t1+t5-2) - 1][0]) + 0.125*((0 +1) >= N ? 0 : A[0][(-4*t1+t5-2)][0 + 1]) + 0.125*(((-4*t1+t5-2)+1) >= N ? 0 : A[0][(-4*t1+t5-2) + 1][0]) + 0.125*((0 -1) < 0 ? 0 : A[0][(-4*t1+t5-2)][0 - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[0][(-4*t1+t5-2)][0];; } for (t6=4*t1+3;t6<=1024*t3+1023;t6++) { if (t1%2 == 0) { A[1][(-4*t1+t5-2)][(-4*t1+t6-2)] = 0.125*(((-4*t1+t5-2)-1) < 0 ? 0: A[0][(-4*t1+t5-2) - 1][(-4*t1+t6-2)]) + 0.125*(((-4*t1+t6-2)+1) >= N ? 0 : A[0][(-4*t1+t5-2)][(-4*t1+t6-2) + 1]) + 0.125*(((-4*t1+t5-2)+1) >= N ? 0 : A[0][(-4*t1+t5-2) + 1][(-4*t1+t6-2)]) + 0.125*(((-4*t1+t6-2)-1) < 0 ? 0 : A[0][(-4*t1+t5-2)][(-4*t1+t6-2) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[0][(-4*t1+t5-2)][(-4*t1+t6-2)];; } if (t1%2 == 0) { A[0][(-4*t1+t5-3)][(-4*t1+t6-3)] = 0.125*(((-4*t1+t5-3)-1) < 0 ? 0: A[1][(-4*t1+t5-3) - 1][(-4*t1+t6-3)]) + 0.125*(((-4*t1+t6-3)+1) >= N ? 0 : A[1][(-4*t1+t5-3)][(-4*t1+t6-3) + 1]) + 0.125*(((-4*t1+t5-3)+1) >= N ? 0 : A[1][(-4*t1+t5-3) + 1][(-4*t1+t6-3)]) + 0.125*(((-4*t1+t6-3)-1) < 0 ? 0 : A[1][(-4*t1+t5-3)][(-4*t1+t6-3) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[1][(-4*t1+t5-3)][(-4*t1+t6-3)];; } } } for (t5=4*t1+5;t5<=4*t1+6;t5++) { for (t6=4*t1+3;t6<=1024*t3+1023;t6++) { if (t1%2 == 0) { A[0][(-4*t1+t5-3)][(-4*t1+t6-3)] = 0.125*(((-4*t1+t5-3)-1) < 0 ? 0: A[1][(-4*t1+t5-3) - 1][(-4*t1+t6-3)]) + 0.125*(((-4*t1+t6-3)+1) >= N ? 0 : A[1][(-4*t1+t5-3)][(-4*t1+t6-3) + 1]) + 0.125*(((-4*t1+t5-3)+1) >= N ? 0 : A[1][(-4*t1+t5-3) + 1][(-4*t1+t6-3)]) + 0.125*(((-4*t1+t6-3)-1) < 0 ? 0 : A[1][(-4*t1+t5-3)][(-4*t1+t6-3) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[1][(-4*t1+t5-3)][(-4*t1+t6-3)];; } } } } if ((t1 == 2*t2) && (t1 <= min(floord(T-2,2),256*t3-2)) && (t1 >= ceild(1024*t3-N+1022,4))) { for (t6=1024*t3;t6<=1024*t3+1023;t6++) { if (t1%2 == 0) { A[1][0][(-4*t1+t6-2)] = 0.125*((0 -1) < 0 ? 0: A[0][0 - 1][(-4*t1+t6-2)]) + 0.125*(((-4*t1+t6-2)+1) >= N ? 0 : A[0][0][(-4*t1+t6-2) + 1]) + 0.125*((0 +1) >= N ? 0 : A[0][0 + 1][(-4*t1+t6-2)]) + 0.125*(((-4*t1+t6-2)-1) < 0 ? 0 : A[0][0][(-4*t1+t6-2) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[0][0][(-4*t1+t6-2)];; } } for (t5=4*t1+3;t5<=4*t1+4;t5++) { for (t6=1024*t3;t6<=1024*t3+1023;t6++) { if (t1%2 == 0) { A[1][(-4*t1+t5-2)][(-4*t1+t6-2)] = 0.125*(((-4*t1+t5-2)-1) < 0 ? 0: A[0][(-4*t1+t5-2) - 1][(-4*t1+t6-2)]) + 0.125*(((-4*t1+t6-2)+1) >= N ? 0 : A[0][(-4*t1+t5-2)][(-4*t1+t6-2) + 1]) + 0.125*(((-4*t1+t5-2)+1) >= N ? 0 : A[0][(-4*t1+t5-2) + 1][(-4*t1+t6-2)]) + 0.125*(((-4*t1+t6-2)-1) < 0 ? 0 : A[0][(-4*t1+t5-2)][(-4*t1+t6-2) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[0][(-4*t1+t5-2)][(-4*t1+t6-2)];; } if (t1%2 == 0) { A[0][(-4*t1+t5-3)][(-4*t1+t6-3)] = 0.125*(((-4*t1+t5-3)-1) < 0 ? 0: A[1][(-4*t1+t5-3) - 1][(-4*t1+t6-3)]) + 0.125*(((-4*t1+t6-3)+1) >= N ? 0 : A[1][(-4*t1+t5-3)][(-4*t1+t6-3) + 1]) + 0.125*(((-4*t1+t5-3)+1) >= N ? 0 : A[1][(-4*t1+t5-3) + 1][(-4*t1+t6-3)]) + 0.125*(((-4*t1+t6-3)-1) < 0 ? 0 : A[1][(-4*t1+t5-3)][(-4*t1+t6-3) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[1][(-4*t1+t5-3)][(-4*t1+t6-3)];; } } } for (t5=4*t1+5;t5<=4*t1+6;t5++) { for (t6=1024*t3;t6<=1024*t3+1023;t6++) { if (t1%2 == 0) { A[0][(-4*t1+t5-3)][(-4*t1+t6-3)] = 0.125*(((-4*t1+t5-3)-1) < 0 ? 0: A[1][(-4*t1+t5-3) - 1][(-4*t1+t6-3)]) + 0.125*(((-4*t1+t6-3)+1) >= N ? 0 : A[1][(-4*t1+t5-3)][(-4*t1+t6-3) + 1]) + 0.125*(((-4*t1+t5-3)+1) >= N ? 0 : A[1][(-4*t1+t5-3) + 1][(-4*t1+t6-3)]) + 0.125*(((-4*t1+t6-3)-1) < 0 ? 0 : A[1][(-4*t1+t5-3)][(-4*t1+t6-3) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[1][(-4*t1+t5-3)][(-4*t1+t6-3)];; } } } } if (t1 == 2*t2) { for (t4=max(ceild(1024*t3-N+1024,2),2*t1+2);t4<=min(min(T-1,2*t1+3),512*t3-1);t4++) { for (t6=1024*t3;t6<=1024*t3+1023;t6++) { if (t1%2 == 0) { A[1][0][(-2*t4+t6)] = 0.125*((0 -1) < 0 ? 0: A[0][0 - 1][(-2*t4+t6)]) + 0.125*(((-2*t4+t6)+1) >= N ? 0 : A[0][0][(-2*t4+t6) + 1]) + 0.125*((0 +1) >= N ? 0 : A[0][0 + 1][(-2*t4+t6)]) + 0.125*(((-2*t4+t6)-1) < 0 ? 0 : A[0][0][(-2*t4+t6) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[0][0][(-2*t4+t6)];; } } for (t5=2*t4+1;t5<=4*t1+7;t5++) { for (t6=1024*t3;t6<=1024*t3+1023;t6++) { if (t1%2 == 0) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.125*(((-2*t4+t5)-1) < 0 ? 0: A[0][(-2*t4+t5) - 1][(-2*t4+t6)]) + 0.125*(((-2*t4+t6)+1) >= N ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + 0.125*(((-2*t4+t5)+1) >= N ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6)]) + 0.125*(((-2*t4+t6)-1) < 0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[0][(-2*t4+t5)][(-2*t4+t6)];; } if (t1%2 == 0) { A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.125*(((-2*t4+t5-1)-1) < 0 ? 0: A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)]) + 0.125*(((-2*t4+t6-1)+1) >= N ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + 0.125*(((-2*t4+t5-1)+1) >= N ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)]) + 0.125*(((-2*t4+t6-1)-1) < 0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[1][(-2*t4+t5-1)][(-2*t4+t6-1)];; } } } } } if ((N == 1) && (t1 == 2*t2)) { for (t4=2*t1+1;t4<=min(T-1,2*t1+3);t4++) { if (t1%2 == 0) { A[1][0][0] = 0.125*((0 -1) < 0 ? 0: A[0][0 - 1][0]) + 0.125*((0 +1) >= N ? 0 : A[0][0][0 + 1]) + 0.125*((0 +1) >= N ? 0 : A[0][0 + 1][0]) + 0.125*((0 -1) < 0 ? 0 : A[0][0][0 - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[0][0][0];; } if (t1%2 == 0) { A[0][0][0] = 0.125*((0 -1) < 0 ? 0: A[1][0 - 1][0]) + 0.125*((0 +1) >= N ? 0 : A[1][0][0 + 1]) + 0.125*((0 +1) >= N ? 0 : A[1][0 + 1][0]) + 0.125*((0 -1) < 0 ? 0 : A[1][0][0 - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[1][0][0];; } } } for (t4=max(max(2*t1,512*t3),4*t1-4*t2+4);t4<=min(min(min(floord(1024*t3-N+1023,2),floord(8*t1-8*t2+N-1,2)),T-1),2*t1+1);t4++) { for (t5=8*t2;t5<=-8*t1+8*t2+4*t4;t5++) { A[1][(-2*t4+t5)][0] = 0.125*(((-2*t4+t5)-1) < 0 ? 0: A[0][(-2*t4+t5) - 1][0]) + 0.125*((0 +1) >= N ? 0 : A[0][(-2*t4+t5)][0 + 1]) + 0.125*(((-2*t4+t5)+1) >= N ? 0 : A[0][(-2*t4+t5) + 1][0]) + 0.125*((0 -1) < 0 ? 0 : A[0][(-2*t4+t5)][0 - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[0][(-2*t4+t5)][0];; for (t6=2*t4+1;t6<=2*t4+N-1;t6++) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.125*(((-2*t4+t5)-1) < 0 ? 0: A[0][(-2*t4+t5) - 1][(-2*t4+t6)]) + 0.125*(((-2*t4+t6)+1) >= N ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + 0.125*(((-2*t4+t5)+1) >= N ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6)]) + 0.125*(((-2*t4+t6)-1) < 0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[0][(-2*t4+t5)][(-2*t4+t6)];; A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.125*(((-2*t4+t5-1)-1) < 0 ? 0: A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)]) + 0.125*(((-2*t4+t6-1)+1) >= N ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + 0.125*(((-2*t4+t5-1)+1) >= N ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)]) + 0.125*(((-2*t4+t6-1)-1) < 0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[1][(-2*t4+t5-1)][(-2*t4+t6-1)];; } A[0][(-2*t4+t5-1)][(N-1)] = 0.125*(((-2*t4+t5-1)-1) < 0 ? 0: A[1][(-2*t4+t5-1) - 1][(N-1)]) + 0.125*(((N-1)+1) >= N ? 0 : A[1][(-2*t4+t5-1)][(N-1) + 1]) + 0.125*(((-2*t4+t5-1)+1) >= N ? 0 : A[1][(-2*t4+t5-1) + 1][(N-1)]) + 0.125*(((N-1)-1) < 0 ? 0 : A[1][(-2*t4+t5-1)][(N-1) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[1][(-2*t4+t5-1)][(N-1)];; } for (t5=-8*t1+8*t2+4*t4+1;t5<=min(2*t4+N,-8*t1+8*t2+4*t4+2);t5++) { for (t6=2*t4+1;t6<=2*t4+N;t6++) { A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.125*(((-2*t4+t5-1)-1) < 0 ? 0: A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)]) + 0.125*(((-2*t4+t6-1)+1) >= N ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + 0.125*(((-2*t4+t5-1)+1) >= N ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)]) + 0.125*(((-2*t4+t6-1)-1) < 0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[1][(-2*t4+t5-1)][(-2*t4+t6-1)];; } } } if (t1 == 2*t2-1) { for (t4=max(2*t1,512*t3);t4<=min(min(min(floord(4*t1+N-5,2),floord(1024*t3-N+1023,2)),T-1),2*t1+1);t4++) { for (t5=4*t1+4;t5<=-4*t1+4*t4+4;t5++) { if ((t1+1)%2 == 0) { A[1][(-2*t4+t5)][0] = 0.125*(((-2*t4+t5)-1) < 0 ? 0: A[0][(-2*t4+t5) - 1][0]) + 0.125*((0 +1) >= N ? 0 : A[0][(-2*t4+t5)][0 + 1]) + 0.125*(((-2*t4+t5)+1) >= N ? 0 : A[0][(-2*t4+t5) + 1][0]) + 0.125*((0 -1) < 0 ? 0 : A[0][(-2*t4+t5)][0 - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[0][(-2*t4+t5)][0];; } for (t6=2*t4+1;t6<=2*t4+N-1;t6++) { if ((t1+1)%2 == 0) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.125*(((-2*t4+t5)-1) < 0 ? 0: A[0][(-2*t4+t5) - 1][(-2*t4+t6)]) + 0.125*(((-2*t4+t6)+1) >= N ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + 0.125*(((-2*t4+t5)+1) >= N ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6)]) + 0.125*(((-2*t4+t6)-1) < 0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[0][(-2*t4+t5)][(-2*t4+t6)];; } if ((t1+1)%2 == 0) { A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.125*(((-2*t4+t5-1)-1) < 0 ? 0: A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)]) + 0.125*(((-2*t4+t6-1)+1) >= N ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + 0.125*(((-2*t4+t5-1)+1) >= N ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)]) + 0.125*(((-2*t4+t6-1)-1) < 0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[1][(-2*t4+t5-1)][(-2*t4+t6-1)];; } } if ((t1+1)%2 == 0) { A[0][(-2*t4+t5-1)][(N-1)] = 0.125*(((-2*t4+t5-1)-1) < 0 ? 0: A[1][(-2*t4+t5-1) - 1][(N-1)]) + 0.125*(((N-1)+1) >= N ? 0 : A[1][(-2*t4+t5-1)][(N-1) + 1]) + 0.125*(((-2*t4+t5-1)+1) >= N ? 0 : A[1][(-2*t4+t5-1) + 1][(N-1)]) + 0.125*(((N-1)-1) < 0 ? 0 : A[1][(-2*t4+t5-1)][(N-1) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[1][(-2*t4+t5-1)][(N-1)];; } } for (t5=-4*t1+4*t4+5;t5<=min(2*t4+N,-4*t1+4*t4+6);t5++) { for (t6=2*t4+1;t6<=2*t4+N;t6++) { if ((t1+1)%2 == 0) { A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.125*(((-2*t4+t5-1)-1) < 0 ? 0: A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)]) + 0.125*(((-2*t4+t6-1)+1) >= N ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + 0.125*(((-2*t4+t5-1)+1) >= N ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)]) + 0.125*(((-2*t4+t6-1)-1) < 0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[1][(-2*t4+t5-1)][(-2*t4+t6-1)];; } } } } } for (t4=max(max(max(ceild(1024*t3-N+1024,2),2*t1),512*t3),4*t1-4*t2+4);t4<=min(min(floord(8*t1-8*t2+N-1,2),T-1),2*t1+1);t4++) { for (t5=8*t2;t5<=-8*t1+8*t2+4*t4;t5++) { A[1][(-2*t4+t5)][0] = 0.125*(((-2*t4+t5)-1) < 0 ? 0: A[0][(-2*t4+t5) - 1][0]) + 0.125*((0 +1) >= N ? 0 : A[0][(-2*t4+t5)][0 + 1]) + 0.125*(((-2*t4+t5)+1) >= N ? 0 : A[0][(-2*t4+t5) + 1][0]) + 0.125*((0 -1) < 0 ? 0 : A[0][(-2*t4+t5)][0 - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[0][(-2*t4+t5)][0];; for (t6=2*t4+1;t6<=1024*t3+1023;t6++) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.125*(((-2*t4+t5)-1) < 0 ? 0: A[0][(-2*t4+t5) - 1][(-2*t4+t6)]) + 0.125*(((-2*t4+t6)+1) >= N ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + 0.125*(((-2*t4+t5)+1) >= N ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6)]) + 0.125*(((-2*t4+t6)-1) < 0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[0][(-2*t4+t5)][(-2*t4+t6)];; A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.125*(((-2*t4+t5-1)-1) < 0 ? 0: A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)]) + 0.125*(((-2*t4+t6-1)+1) >= N ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + 0.125*(((-2*t4+t5-1)+1) >= N ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)]) + 0.125*(((-2*t4+t6-1)-1) < 0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[1][(-2*t4+t5-1)][(-2*t4+t6-1)];; } } for (t5=-8*t1+8*t2+4*t4+1;t5<=min(2*t4+N,-8*t1+8*t2+4*t4+2);t5++) { for (t6=2*t4+1;t6<=1024*t3+1023;t6++) { A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.125*(((-2*t4+t5-1)-1) < 0 ? 0: A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)]) + 0.125*(((-2*t4+t6-1)+1) >= N ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + 0.125*(((-2*t4+t5-1)+1) >= N ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)]) + 0.125*(((-2*t4+t6-1)-1) < 0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[1][(-2*t4+t5-1)][(-2*t4+t6-1)];; } } } if (t1 == 2*t2-1) { for (t4=max(max(ceild(1024*t3-N+1024,2),2*t1),512*t3);t4<=min(min(floord(4*t1+N-5,2),T-1),2*t1+1);t4++) { for (t5=4*t1+4;t5<=-4*t1+4*t4+4;t5++) { if ((t1+1)%2 == 0) { A[1][(-2*t4+t5)][0] = 0.125*(((-2*t4+t5)-1) < 0 ? 0: A[0][(-2*t4+t5) - 1][0]) + 0.125*((0 +1) >= N ? 0 : A[0][(-2*t4+t5)][0 + 1]) + 0.125*(((-2*t4+t5)+1) >= N ? 0 : A[0][(-2*t4+t5) + 1][0]) + 0.125*((0 -1) < 0 ? 0 : A[0][(-2*t4+t5)][0 - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[0][(-2*t4+t5)][0];; } for (t6=2*t4+1;t6<=1024*t3+1023;t6++) { if ((t1+1)%2 == 0) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.125*(((-2*t4+t5)-1) < 0 ? 0: A[0][(-2*t4+t5) - 1][(-2*t4+t6)]) + 0.125*(((-2*t4+t6)+1) >= N ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + 0.125*(((-2*t4+t5)+1) >= N ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6)]) + 0.125*(((-2*t4+t6)-1) < 0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[0][(-2*t4+t5)][(-2*t4+t6)];; } if ((t1+1)%2 == 0) { A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.125*(((-2*t4+t5-1)-1) < 0 ? 0: A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)]) + 0.125*(((-2*t4+t6-1)+1) >= N ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + 0.125*(((-2*t4+t5-1)+1) >= N ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)]) + 0.125*(((-2*t4+t6-1)-1) < 0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[1][(-2*t4+t5-1)][(-2*t4+t6-1)];; } } } for (t5=-4*t1+4*t4+5;t5<=min(2*t4+N,-4*t1+4*t4+6);t5++) { for (t6=2*t4+1;t6<=1024*t3+1023;t6++) { if ((t1+1)%2 == 0) { A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.125*(((-2*t4+t5-1)-1) < 0 ? 0: A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)]) + 0.125*(((-2*t4+t6-1)+1) >= N ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + 0.125*(((-2*t4+t5-1)+1) >= N ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)]) + 0.125*(((-2*t4+t6-1)-1) < 0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[1][(-2*t4+t5-1)][(-2*t4+t6-1)];; } } } } } if ((t1 <= min(min(min(min(floord(T-2,2),floord(8*t2-N+2,4)),floord(1024*t3-N+1021,4)),2*t2-2),256*t3-1)) && (t1 >= max(max(0,ceild(8*t2-N-1,4)),ceild(1024*t3-N-1,4)))) { for (t5=8*t2;t5<=4*t1+N+1;t5++) { for (t6=1024*t3;t6<=4*t1+N+1;t6++) { A[1][(-4*t1+t5-2)][(-4*t1+t6-2)] = 0.125*(((-4*t1+t5-2)-1) < 0 ? 0: A[0][(-4*t1+t5-2) - 1][(-4*t1+t6-2)]) + 0.125*(((-4*t1+t6-2)+1) >= N ? 0 : A[0][(-4*t1+t5-2)][(-4*t1+t6-2) + 1]) + 0.125*(((-4*t1+t5-2)+1) >= N ? 0 : A[0][(-4*t1+t5-2) + 1][(-4*t1+t6-2)]) + 0.125*(((-4*t1+t6-2)-1) < 0 ? 0 : A[0][(-4*t1+t5-2)][(-4*t1+t6-2) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[0][(-4*t1+t5-2)][(-4*t1+t6-2)];; A[0][(-4*t1+t5-3)][(-4*t1+t6-3)] = 0.125*(((-4*t1+t5-3)-1) < 0 ? 0: A[1][(-4*t1+t5-3) - 1][(-4*t1+t6-3)]) + 0.125*(((-4*t1+t6-3)+1) >= N ? 0 : A[1][(-4*t1+t5-3)][(-4*t1+t6-3) + 1]) + 0.125*(((-4*t1+t5-3)+1) >= N ? 0 : A[1][(-4*t1+t5-3) + 1][(-4*t1+t6-3)]) + 0.125*(((-4*t1+t6-3)-1) < 0 ? 0 : A[1][(-4*t1+t5-3)][(-4*t1+t6-3) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[1][(-4*t1+t5-3)][(-4*t1+t6-3)];; } A[0][(-4*t1+t5-3)][(N-1)] = 0.125*(((-4*t1+t5-3)-1) < 0 ? 0: A[1][(-4*t1+t5-3) - 1][(N-1)]) + 0.125*(((N-1)+1) >= N ? 0 : A[1][(-4*t1+t5-3)][(N-1) + 1]) + 0.125*(((-4*t1+t5-3)+1) >= N ? 0 : A[1][(-4*t1+t5-3) + 1][(N-1)]) + 0.125*(((N-1)-1) < 0 ? 0 : A[1][(-4*t1+t5-3)][(N-1) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[1][(-4*t1+t5-3)][(N-1)];; } for (t6=1024*t3;t6<=4*t1+N+2;t6++) { A[0][(N-1)][(-4*t1+t6-3)] = 0.125*(((N-1)-1) < 0 ? 0: A[1][(N-1) - 1][(-4*t1+t6-3)]) + 0.125*(((-4*t1+t6-3)+1) >= N ? 0 : A[1][(N-1)][(-4*t1+t6-3) + 1]) + 0.125*(((N-1)+1) >= N ? 0 : A[1][(N-1) + 1][(-4*t1+t6-3)]) + 0.125*(((-4*t1+t6-3)-1) < 0 ? 0 : A[1][(N-1)][(-4*t1+t6-3) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[1][(N-1)][(-4*t1+t6-3)];; } } if ((N >= 3) && (N <= 6) && (t1 == 2*t2-1) && (t1 == 256*t3-1) && (t1 >= 255) && (t1 <= floord(T-2,2))) { for (t5=4*t1+4;t5<=4*t1+N+1;t5++) { for (t6=4*t1+4;t6<=4*t1+N+1;t6++) { if ((t1+1)%2 == 0) { if ((t1+1)%256 == 0) { A[1][(-4*t1+t5-2)][(-4*t1+t6-2)] = 0.125*(((-4*t1+t5-2)-1) < 0 ? 0: A[0][(-4*t1+t5-2) - 1][(-4*t1+t6-2)]) + 0.125*(((-4*t1+t6-2)+1) >= N ? 0 : A[0][(-4*t1+t5-2)][(-4*t1+t6-2) + 1]) + 0.125*(((-4*t1+t5-2)+1) >= N ? 0 : A[0][(-4*t1+t5-2) + 1][(-4*t1+t6-2)]) + 0.125*(((-4*t1+t6-2)-1) < 0 ? 0 : A[0][(-4*t1+t5-2)][(-4*t1+t6-2) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[0][(-4*t1+t5-2)][(-4*t1+t6-2)];; } } if ((t1+1)%2 == 0) { if ((t1+1)%256 == 0) { A[0][(-4*t1+t5-3)][(-4*t1+t6-3)] = 0.125*(((-4*t1+t5-3)-1) < 0 ? 0: A[1][(-4*t1+t5-3) - 1][(-4*t1+t6-3)]) + 0.125*(((-4*t1+t6-3)+1) >= N ? 0 : A[1][(-4*t1+t5-3)][(-4*t1+t6-3) + 1]) + 0.125*(((-4*t1+t5-3)+1) >= N ? 0 : A[1][(-4*t1+t5-3) + 1][(-4*t1+t6-3)]) + 0.125*(((-4*t1+t6-3)-1) < 0 ? 0 : A[1][(-4*t1+t5-3)][(-4*t1+t6-3) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[1][(-4*t1+t5-3)][(-4*t1+t6-3)];; } } } if ((t1+1)%2 == 0) { if ((t1+1)%256 == 0) { A[0][(-4*t1+t5-3)][(N-1)] = 0.125*(((-4*t1+t5-3)-1) < 0 ? 0: A[1][(-4*t1+t5-3) - 1][(N-1)]) + 0.125*(((N-1)+1) >= N ? 0 : A[1][(-4*t1+t5-3)][(N-1) + 1]) + 0.125*(((-4*t1+t5-3)+1) >= N ? 0 : A[1][(-4*t1+t5-3) + 1][(N-1)]) + 0.125*(((N-1)-1) < 0 ? 0 : A[1][(-4*t1+t5-3)][(N-1) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[1][(-4*t1+t5-3)][(N-1)];; } } } for (t6=4*t1+4;t6<=4*t1+N+2;t6++) { if ((t1+1)%2 == 0) { if ((t1+1)%256 == 0) { A[0][(N-1)][(-4*t1+t6-3)] = 0.125*(((N-1)-1) < 0 ? 0: A[1][(N-1) - 1][(-4*t1+t6-3)]) + 0.125*(((-4*t1+t6-3)+1) >= N ? 0 : A[1][(N-1)][(-4*t1+t6-3) + 1]) + 0.125*(((N-1)+1) >= N ? 0 : A[1][(N-1) + 1][(-4*t1+t6-3)]) + 0.125*(((-4*t1+t6-3)-1) < 0 ? 0 : A[1][(N-1)][(-4*t1+t6-3) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[1][(N-1)][(-4*t1+t6-3)];; } } } } if ((t1 <= min(min(floord(T-2,2),floord(8*t2-N+2,4)),256*t3-1)) && (t1 >= max(max(0,ceild(8*t2-N-1,4)),ceild(1024*t3-N+1022,4)))) { for (t5=8*t2;t5<=4*t1+N+1;t5++) { for (t6=1024*t3;t6<=1024*t3+1023;t6++) { A[1][(-4*t1+t5-2)][(-4*t1+t6-2)] = 0.125*(((-4*t1+t5-2)-1) < 0 ? 0: A[0][(-4*t1+t5-2) - 1][(-4*t1+t6-2)]) + 0.125*(((-4*t1+t6-2)+1) >= N ? 0 : A[0][(-4*t1+t5-2)][(-4*t1+t6-2) + 1]) + 0.125*(((-4*t1+t5-2)+1) >= N ? 0 : A[0][(-4*t1+t5-2) + 1][(-4*t1+t6-2)]) + 0.125*(((-4*t1+t6-2)-1) < 0 ? 0 : A[0][(-4*t1+t5-2)][(-4*t1+t6-2) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[0][(-4*t1+t5-2)][(-4*t1+t6-2)];; A[0][(-4*t1+t5-3)][(-4*t1+t6-3)] = 0.125*(((-4*t1+t5-3)-1) < 0 ? 0: A[1][(-4*t1+t5-3) - 1][(-4*t1+t6-3)]) + 0.125*(((-4*t1+t6-3)+1) >= N ? 0 : A[1][(-4*t1+t5-3)][(-4*t1+t6-3) + 1]) + 0.125*(((-4*t1+t5-3)+1) >= N ? 0 : A[1][(-4*t1+t5-3) + 1][(-4*t1+t6-3)]) + 0.125*(((-4*t1+t6-3)-1) < 0 ? 0 : A[1][(-4*t1+t5-3)][(-4*t1+t6-3) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[1][(-4*t1+t5-3)][(-4*t1+t6-3)];; } } for (t6=1024*t3;t6<=1024*t3+1023;t6++) { A[0][(N-1)][(-4*t1+t6-3)] = 0.125*(((N-1)-1) < 0 ? 0: A[1][(N-1) - 1][(-4*t1+t6-3)]) + 0.125*(((-4*t1+t6-3)+1) >= N ? 0 : A[1][(N-1)][(-4*t1+t6-3) + 1]) + 0.125*(((N-1)+1) >= N ? 0 : A[1][(N-1) + 1][(-4*t1+t6-3)]) + 0.125*(((-4*t1+t6-3)-1) < 0 ? 0 : A[1][(N-1)][(-4*t1+t6-3) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[1][(N-1)][(-4*t1+t6-3)];; } } if ((t1 <= min(min(min(floord(T-3,2),floord(8*t2-N+3,4)),floord(1024*t3-N+1019,4)),256*t3-2)) && (t1 >= max(ceild(8*t2-N,4),ceild(1024*t3-N-3,4)))) { for (t5=8*t2+1;t5<=8*t2+2;t5++) { for (t6=1024*t3;t6<=4*t1+N+3;t6++) { A[1][(-4*t1+t5-4)][(-4*t1+t6-4)] = 0.125*(((-4*t1+t5-4)-1) < 0 ? 0: A[0][(-4*t1+t5-4) - 1][(-4*t1+t6-4)]) + 0.125*(((-4*t1+t6-4)+1) >= N ? 0 : A[0][(-4*t1+t5-4)][(-4*t1+t6-4) + 1]) + 0.125*(((-4*t1+t5-4)+1) >= N ? 0 : A[0][(-4*t1+t5-4) + 1][(-4*t1+t6-4)]) + 0.125*(((-4*t1+t6-4)-1) < 0 ? 0 : A[0][(-4*t1+t5-4)][(-4*t1+t6-4) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[0][(-4*t1+t5-4)][(-4*t1+t6-4)];; } } for (t5=8*t2+3;t5<=4*t1+N+3;t5++) { for (t6=1024*t3;t6<=4*t1+N+3;t6++) { A[1][(-4*t1+t5-4)][(-4*t1+t6-4)] = 0.125*(((-4*t1+t5-4)-1) < 0 ? 0: A[0][(-4*t1+t5-4) - 1][(-4*t1+t6-4)]) + 0.125*(((-4*t1+t6-4)+1) >= N ? 0 : A[0][(-4*t1+t5-4)][(-4*t1+t6-4) + 1]) + 0.125*(((-4*t1+t5-4)+1) >= N ? 0 : A[0][(-4*t1+t5-4) + 1][(-4*t1+t6-4)]) + 0.125*(((-4*t1+t6-4)-1) < 0 ? 0 : A[0][(-4*t1+t5-4)][(-4*t1+t6-4) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[0][(-4*t1+t5-4)][(-4*t1+t6-4)];; A[0][(-4*t1+t5-5)][(-4*t1+t6-5)] = 0.125*(((-4*t1+t5-5)-1) < 0 ? 0: A[1][(-4*t1+t5-5) - 1][(-4*t1+t6-5)]) + 0.125*(((-4*t1+t6-5)+1) >= N ? 0 : A[1][(-4*t1+t5-5)][(-4*t1+t6-5) + 1]) + 0.125*(((-4*t1+t5-5)+1) >= N ? 0 : A[1][(-4*t1+t5-5) + 1][(-4*t1+t6-5)]) + 0.125*(((-4*t1+t6-5)-1) < 0 ? 0 : A[1][(-4*t1+t5-5)][(-4*t1+t6-5) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[1][(-4*t1+t5-5)][(-4*t1+t6-5)];; } A[0][(-4*t1+t5-5)][(N-1)] = 0.125*(((-4*t1+t5-5)-1) < 0 ? 0: A[1][(-4*t1+t5-5) - 1][(N-1)]) + 0.125*(((N-1)+1) >= N ? 0 : A[1][(-4*t1+t5-5)][(N-1) + 1]) + 0.125*(((-4*t1+t5-5)+1) >= N ? 0 : A[1][(-4*t1+t5-5) + 1][(N-1)]) + 0.125*(((N-1)-1) < 0 ? 0 : A[1][(-4*t1+t5-5)][(N-1) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[1][(-4*t1+t5-5)][(N-1)];; } for (t6=1024*t3;t6<=4*t1+N+4;t6++) { A[0][(N-1)][(-4*t1+t6-5)] = 0.125*(((N-1)-1) < 0 ? 0: A[1][(N-1) - 1][(-4*t1+t6-5)]) + 0.125*(((-4*t1+t6-5)+1) >= N ? 0 : A[1][(N-1)][(-4*t1+t6-5) + 1]) + 0.125*(((N-1)+1) >= N ? 0 : A[1][(N-1) + 1][(-4*t1+t6-5)]) + 0.125*(((-4*t1+t6-5)-1) < 0 ? 0 : A[1][(N-1)][(-4*t1+t6-5) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[1][(N-1)][(-4*t1+t6-5)];; } } for (t4=max(max(max(ceild(8*t2-N+8,2),ceild(1024*t3-N+1,2)),2*t1+2),4*t1-4*t2+4);t4<=min(min(min(floord(1024*t3-N+1023,2),T-1),2*t1+3),512*t3-1);t4++) { for (t5=-8*t1+8*t2+4*t4-7;t5<=-8*t1+8*t2+4*t4-6;t5++) { for (t6=1024*t3;t6<=2*t4+N-1;t6++) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.125*(((-2*t4+t5)-1) < 0 ? 0: A[0][(-2*t4+t5) - 1][(-2*t4+t6)]) + 0.125*(((-2*t4+t6)+1) >= N ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + 0.125*(((-2*t4+t5)+1) >= N ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6)]) + 0.125*(((-2*t4+t6)-1) < 0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[0][(-2*t4+t5)][(-2*t4+t6)];; } } for (t5=-8*t1+8*t2+4*t4-5;t5<=8*t2+7;t5++) { for (t6=1024*t3;t6<=2*t4+N-1;t6++) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.125*(((-2*t4+t5)-1) < 0 ? 0: A[0][(-2*t4+t5) - 1][(-2*t4+t6)]) + 0.125*(((-2*t4+t6)+1) >= N ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + 0.125*(((-2*t4+t5)+1) >= N ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6)]) + 0.125*(((-2*t4+t6)-1) < 0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[0][(-2*t4+t5)][(-2*t4+t6)];; A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.125*(((-2*t4+t5-1)-1) < 0 ? 0: A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)]) + 0.125*(((-2*t4+t6-1)+1) >= N ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + 0.125*(((-2*t4+t5-1)+1) >= N ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)]) + 0.125*(((-2*t4+t6-1)-1) < 0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[1][(-2*t4+t5-1)][(-2*t4+t6-1)];; } A[0][(-2*t4+t5-1)][(N-1)] = 0.125*(((-2*t4+t5-1)-1) < 0 ? 0: A[1][(-2*t4+t5-1) - 1][(N-1)]) + 0.125*(((N-1)+1) >= N ? 0 : A[1][(-2*t4+t5-1)][(N-1) + 1]) + 0.125*(((-2*t4+t5-1)+1) >= N ? 0 : A[1][(-2*t4+t5-1) + 1][(N-1)]) + 0.125*(((N-1)-1) < 0 ? 0 : A[1][(-2*t4+t5-1)][(N-1) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[1][(-2*t4+t5-1)][(N-1)];; } } if (t1 <= min(min(min(floord(8*t2-N+1,4),floord(4*t2+512*t3-N+509,4)),floord(8*t2+2*T-N-7,8)),floord(8*t2+1024*t3-N-7,8))) { if ((N+1)%2 == 0) { for (t5=8*t1-8*t2+2*N+3;t5<=8*t1-8*t2+2*N+4;t5++) { for (t6=1024*t3;t6<=8*t1-8*t2+2*N+4;t6++) { A[1][(-8*t1+8*t2+t5-N-5)][(-8*t1+8*t2+t6-N-5)] = 0.125*(((-8*t1+8*t2+t5-N-5)-1) < 0 ? 0: A[0][(-8*t1+8*t2+t5-N-5) - 1][(-8*t1+8*t2+t6-N-5)]) + 0.125*(((-8*t1+8*t2+t6-N-5)+1) >= N ? 0 : A[0][(-8*t1+8*t2+t5-N-5)][(-8*t1+8*t2+t6-N-5) + 1]) + 0.125*(((-8*t1+8*t2+t5-N-5)+1) >= N ? 0 : A[0][(-8*t1+8*t2+t5-N-5) + 1][(-8*t1+8*t2+t6-N-5)]) + 0.125*(((-8*t1+8*t2+t6-N-5)-1) < 0 ? 0 : A[0][(-8*t1+8*t2+t5-N-5)][(-8*t1+8*t2+t6-N-5) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[0][(-8*t1+8*t2+t5-N-5)][(-8*t1+8*t2+t6-N-5)];; } } for (t6=1024*t3;t6<=8*t1-8*t2+2*N+5;t6++) { A[0][(N-1)][(-8*t1+8*t2+t6-N-6)] = 0.125*(((N-1)-1) < 0 ? 0: A[1][(N-1) - 1][(-8*t1+8*t2+t6-N-6)]) + 0.125*(((-8*t1+8*t2+t6-N-6)+1) >= N ? 0 : A[1][(N-1)][(-8*t1+8*t2+t6-N-6) + 1]) + 0.125*(((N-1)+1) >= N ? 0 : A[1][(N-1) + 1][(-8*t1+8*t2+t6-N-6)]) + 0.125*(((-8*t1+8*t2+t6-N-6)-1) < 0 ? 0 : A[1][(N-1)][(-8*t1+8*t2+t6-N-6) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[1][(N-1)][(-8*t1+8*t2+t6-N-6)];; } } } if ((t1 <= min(min(floord(T-3,2),floord(8*t2-N+3,4)),256*t3-2)) && (t1 >= max(ceild(8*t2-N,4),ceild(1024*t3-N+1020,4)))) { for (t5=8*t2+1;t5<=8*t2+2;t5++) { for (t6=1024*t3;t6<=1024*t3+1023;t6++) { A[1][(-4*t1+t5-4)][(-4*t1+t6-4)] = 0.125*(((-4*t1+t5-4)-1) < 0 ? 0: A[0][(-4*t1+t5-4) - 1][(-4*t1+t6-4)]) + 0.125*(((-4*t1+t6-4)+1) >= N ? 0 : A[0][(-4*t1+t5-4)][(-4*t1+t6-4) + 1]) + 0.125*(((-4*t1+t5-4)+1) >= N ? 0 : A[0][(-4*t1+t5-4) + 1][(-4*t1+t6-4)]) + 0.125*(((-4*t1+t6-4)-1) < 0 ? 0 : A[0][(-4*t1+t5-4)][(-4*t1+t6-4) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[0][(-4*t1+t5-4)][(-4*t1+t6-4)];; } } for (t5=8*t2+3;t5<=4*t1+N+3;t5++) { for (t6=1024*t3;t6<=1024*t3+1023;t6++) { A[1][(-4*t1+t5-4)][(-4*t1+t6-4)] = 0.125*(((-4*t1+t5-4)-1) < 0 ? 0: A[0][(-4*t1+t5-4) - 1][(-4*t1+t6-4)]) + 0.125*(((-4*t1+t6-4)+1) >= N ? 0 : A[0][(-4*t1+t5-4)][(-4*t1+t6-4) + 1]) + 0.125*(((-4*t1+t5-4)+1) >= N ? 0 : A[0][(-4*t1+t5-4) + 1][(-4*t1+t6-4)]) + 0.125*(((-4*t1+t6-4)-1) < 0 ? 0 : A[0][(-4*t1+t5-4)][(-4*t1+t6-4) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[0][(-4*t1+t5-4)][(-4*t1+t6-4)];; A[0][(-4*t1+t5-5)][(-4*t1+t6-5)] = 0.125*(((-4*t1+t5-5)-1) < 0 ? 0: A[1][(-4*t1+t5-5) - 1][(-4*t1+t6-5)]) + 0.125*(((-4*t1+t6-5)+1) >= N ? 0 : A[1][(-4*t1+t5-5)][(-4*t1+t6-5) + 1]) + 0.125*(((-4*t1+t5-5)+1) >= N ? 0 : A[1][(-4*t1+t5-5) + 1][(-4*t1+t6-5)]) + 0.125*(((-4*t1+t6-5)-1) < 0 ? 0 : A[1][(-4*t1+t5-5)][(-4*t1+t6-5) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[1][(-4*t1+t5-5)][(-4*t1+t6-5)];; } } for (t6=1024*t3;t6<=1024*t3+1023;t6++) { A[0][(N-1)][(-4*t1+t6-5)] = 0.125*(((N-1)-1) < 0 ? 0: A[1][(N-1) - 1][(-4*t1+t6-5)]) + 0.125*(((-4*t1+t6-5)+1) >= N ? 0 : A[1][(N-1)][(-4*t1+t6-5) + 1]) + 0.125*(((N-1)+1) >= N ? 0 : A[1][(N-1) + 1][(-4*t1+t6-5)]) + 0.125*(((-4*t1+t6-5)-1) < 0 ? 0 : A[1][(N-1)][(-4*t1+t6-5) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[1][(N-1)][(-4*t1+t6-5)];; } } for (t4=max(max(max(ceild(8*t2-N+8,2),ceild(1024*t3-N+1024,2)),2*t1+2),4*t1-4*t2+4);t4<=min(min(T-1,2*t1+3),512*t3-1);t4++) { for (t5=-8*t1+8*t2+4*t4-7;t5<=-8*t1+8*t2+4*t4-6;t5++) { for (t6=1024*t3;t6<=1024*t3+1023;t6++) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.125*(((-2*t4+t5)-1) < 0 ? 0: A[0][(-2*t4+t5) - 1][(-2*t4+t6)]) + 0.125*(((-2*t4+t6)+1) >= N ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + 0.125*(((-2*t4+t5)+1) >= N ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6)]) + 0.125*(((-2*t4+t6)-1) < 0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[0][(-2*t4+t5)][(-2*t4+t6)];; } } for (t5=-8*t1+8*t2+4*t4-5;t5<=8*t2+7;t5++) { for (t6=1024*t3;t6<=1024*t3+1023;t6++) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.125*(((-2*t4+t5)-1) < 0 ? 0: A[0][(-2*t4+t5) - 1][(-2*t4+t6)]) + 0.125*(((-2*t4+t6)+1) >= N ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + 0.125*(((-2*t4+t5)+1) >= N ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6)]) + 0.125*(((-2*t4+t6)-1) < 0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[0][(-2*t4+t5)][(-2*t4+t6)];; A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.125*(((-2*t4+t5-1)-1) < 0 ? 0: A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)]) + 0.125*(((-2*t4+t6-1)+1) >= N ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + 0.125*(((-2*t4+t5-1)+1) >= N ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)]) + 0.125*(((-2*t4+t6-1)-1) < 0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[1][(-2*t4+t5-1)][(-2*t4+t6-1)];; } } } if ((t1 <= min(min(floord(8*t2-N+1,4),floord(8*t2+2*T-N-7,8)),floord(8*t2+1024*t3-N-7,8))) && (t1 >= ceild(4*t2+512*t3-N+511,4))) { if ((N+1)%2 == 0) { for (t5=8*t1-8*t2+2*N+3;t5<=8*t1-8*t2+2*N+4;t5++) { for (t6=1024*t3;t6<=1024*t3+1023;t6++) { A[1][(-8*t1+8*t2+t5-N-5)][(-8*t1+8*t2+t6-N-5)] = 0.125*(((-8*t1+8*t2+t5-N-5)-1) < 0 ? 0: A[0][(-8*t1+8*t2+t5-N-5) - 1][(-8*t1+8*t2+t6-N-5)]) + 0.125*(((-8*t1+8*t2+t6-N-5)+1) >= N ? 0 : A[0][(-8*t1+8*t2+t5-N-5)][(-8*t1+8*t2+t6-N-5) + 1]) + 0.125*(((-8*t1+8*t2+t5-N-5)+1) >= N ? 0 : A[0][(-8*t1+8*t2+t5-N-5) + 1][(-8*t1+8*t2+t6-N-5)]) + 0.125*(((-8*t1+8*t2+t6-N-5)-1) < 0 ? 0 : A[0][(-8*t1+8*t2+t5-N-5)][(-8*t1+8*t2+t6-N-5) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[0][(-8*t1+8*t2+t5-N-5)][(-8*t1+8*t2+t6-N-5)];; } } for (t6=1024*t3;t6<=1024*t3+1023;t6++) { A[0][(N-1)][(-8*t1+8*t2+t6-N-6)] = 0.125*(((N-1)-1) < 0 ? 0: A[1][(N-1) - 1][(-8*t1+8*t2+t6-N-6)]) + 0.125*(((-8*t1+8*t2+t6-N-6)+1) >= N ? 0 : A[1][(N-1)][(-8*t1+8*t2+t6-N-6) + 1]) + 0.125*(((N-1)+1) >= N ? 0 : A[1][(N-1) + 1][(-8*t1+8*t2+t6-N-6)]) + 0.125*(((-8*t1+8*t2+t6-N-6)-1) < 0 ? 0 : A[1][(N-1)][(-8*t1+8*t2+t6-N-6) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[1][(N-1)][(-8*t1+8*t2+t6-N-6)];; } } } if ((N >= 2) && (t1 == 2*t2)) { for (t4=max(ceild(4*t1+N,2),512*t3);t4<=min(floord(4*t1-N+7,2),T-1);t4++) { for (t6=2*t4;t6<=2*t4+N-1;t6++) { if (t1%2 == 0) { A[1][0][(-2*t4+t6)] = 0.125*((0 -1) < 0 ? 0: A[0][0 - 1][(-2*t4+t6)]) + 0.125*(((-2*t4+t6)+1) >= N ? 0 : A[0][0][(-2*t4+t6) + 1]) + 0.125*((0 +1) >= N ? 0 : A[0][0 + 1][(-2*t4+t6)]) + 0.125*(((-2*t4+t6)-1) < 0 ? 0 : A[0][0][(-2*t4+t6) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[0][0][(-2*t4+t6)];; } } for (t5=2*t4+1;t5<=2*t4+N-1;t5++) { if (t1%2 == 0) { A[1][(-2*t4+t5)][0] = 0.125*(((-2*t4+t5)-1) < 0 ? 0: A[0][(-2*t4+t5) - 1][0]) + 0.125*((0 +1) >= N ? 0 : A[0][(-2*t4+t5)][0 + 1]) + 0.125*(((-2*t4+t5)+1) >= N ? 0 : A[0][(-2*t4+t5) + 1][0]) + 0.125*((0 -1) < 0 ? 0 : A[0][(-2*t4+t5)][0 - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[0][(-2*t4+t5)][0];; } for (t6=2*t4+1;t6<=2*t4+N-1;t6++) { if (t1%2 == 0) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.125*(((-2*t4+t5)-1) < 0 ? 0: A[0][(-2*t4+t5) - 1][(-2*t4+t6)]) + 0.125*(((-2*t4+t6)+1) >= N ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + 0.125*(((-2*t4+t5)+1) >= N ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6)]) + 0.125*(((-2*t4+t6)-1) < 0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[0][(-2*t4+t5)][(-2*t4+t6)];; } if (t1%2 == 0) { A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.125*(((-2*t4+t5-1)-1) < 0 ? 0: A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)]) + 0.125*(((-2*t4+t6-1)+1) >= N ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + 0.125*(((-2*t4+t5-1)+1) >= N ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)]) + 0.125*(((-2*t4+t6-1)-1) < 0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[1][(-2*t4+t5-1)][(-2*t4+t6-1)];; } } if (t1%2 == 0) { A[0][(-2*t4+t5-1)][(N-1)] = 0.125*(((-2*t4+t5-1)-1) < 0 ? 0: A[1][(-2*t4+t5-1) - 1][(N-1)]) + 0.125*(((N-1)+1) >= N ? 0 : A[1][(-2*t4+t5-1)][(N-1) + 1]) + 0.125*(((-2*t4+t5-1)+1) >= N ? 0 : A[1][(-2*t4+t5-1) + 1][(N-1)]) + 0.125*(((N-1)-1) < 0 ? 0 : A[1][(-2*t4+t5-1)][(N-1) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[1][(-2*t4+t5-1)][(N-1)];; } } for (t6=2*t4+1;t6<=2*t4+N;t6++) { if (t1%2 == 0) { A[0][(N-1)][(-2*t4+t6-1)] = 0.125*(((N-1)-1) < 0 ? 0: A[1][(N-1) - 1][(-2*t4+t6-1)]) + 0.125*(((-2*t4+t6-1)+1) >= N ? 0 : A[1][(N-1)][(-2*t4+t6-1) + 1]) + 0.125*(((N-1)+1) >= N ? 0 : A[1][(N-1) + 1][(-2*t4+t6-1)]) + 0.125*(((-2*t4+t6-1)-1) < 0 ? 0 : A[1][(N-1)][(-2*t4+t6-1) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[1][(N-1)][(-2*t4+t6-1)];; } } } } if (t1 == 2*t2) { for (t4=max(512*t3,2*t1+2);t4<=min(min(min(floord(4*t1+N-1,2),floord(1024*t3-N+1023,2)),T-1),2*t1+3);t4++) { for (t6=2*t4;t6<=2*t4+N-1;t6++) { if (t1%2 == 0) { A[1][0][(-2*t4+t6)] = 0.125*((0 -1) < 0 ? 0: A[0][0 - 1][(-2*t4+t6)]) + 0.125*(((-2*t4+t6)+1) >= N ? 0 : A[0][0][(-2*t4+t6) + 1]) + 0.125*((0 +1) >= N ? 0 : A[0][0 + 1][(-2*t4+t6)]) + 0.125*(((-2*t4+t6)-1) < 0 ? 0 : A[0][0][(-2*t4+t6) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[0][0][(-2*t4+t6)];; } } for (t5=2*t4+1;t5<=4*t1+7;t5++) { if (t1%2 == 0) { A[1][(-2*t4+t5)][0] = 0.125*(((-2*t4+t5)-1) < 0 ? 0: A[0][(-2*t4+t5) - 1][0]) + 0.125*((0 +1) >= N ? 0 : A[0][(-2*t4+t5)][0 + 1]) + 0.125*(((-2*t4+t5)+1) >= N ? 0 : A[0][(-2*t4+t5) + 1][0]) + 0.125*((0 -1) < 0 ? 0 : A[0][(-2*t4+t5)][0 - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[0][(-2*t4+t5)][0];; } for (t6=2*t4+1;t6<=2*t4+N-1;t6++) { if (t1%2 == 0) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.125*(((-2*t4+t5)-1) < 0 ? 0: A[0][(-2*t4+t5) - 1][(-2*t4+t6)]) + 0.125*(((-2*t4+t6)+1) >= N ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + 0.125*(((-2*t4+t5)+1) >= N ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6)]) + 0.125*(((-2*t4+t6)-1) < 0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[0][(-2*t4+t5)][(-2*t4+t6)];; } if (t1%2 == 0) { A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.125*(((-2*t4+t5-1)-1) < 0 ? 0: A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)]) + 0.125*(((-2*t4+t6-1)+1) >= N ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + 0.125*(((-2*t4+t5-1)+1) >= N ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)]) + 0.125*(((-2*t4+t6-1)-1) < 0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[1][(-2*t4+t5-1)][(-2*t4+t6-1)];; } } if (t1%2 == 0) { A[0][(-2*t4+t5-1)][(N-1)] = 0.125*(((-2*t4+t5-1)-1) < 0 ? 0: A[1][(-2*t4+t5-1) - 1][(N-1)]) + 0.125*(((N-1)+1) >= N ? 0 : A[1][(-2*t4+t5-1)][(N-1) + 1]) + 0.125*(((-2*t4+t5-1)+1) >= N ? 0 : A[1][(-2*t4+t5-1) + 1][(N-1)]) + 0.125*(((N-1)-1) < 0 ? 0 : A[1][(-2*t4+t5-1)][(N-1) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[1][(-2*t4+t5-1)][(N-1)];; } } } } if ((t1 <= min(min(min(floord(T-2,2),floord(8*t2-N+2,4)),floord(1024*t3-N+1021,4)),2*t2-2)) && (t1 >= max(ceild(8*t2-N-1,4),256*t3))) { for (t5=8*t2;t5<=4*t1+N+1;t5++) { A[1][(-4*t1+t5-2)][0] = 0.125*(((-4*t1+t5-2)-1) < 0 ? 0: A[0][(-4*t1+t5-2) - 1][0]) + 0.125*((0 +1) >= N ? 0 : A[0][(-4*t1+t5-2)][0 + 1]) + 0.125*(((-4*t1+t5-2)+1) >= N ? 0 : A[0][(-4*t1+t5-2) + 1][0]) + 0.125*((0 -1) < 0 ? 0 : A[0][(-4*t1+t5-2)][0 - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[0][(-4*t1+t5-2)][0];; for (t6=4*t1+3;t6<=4*t1+N+1;t6++) { A[1][(-4*t1+t5-2)][(-4*t1+t6-2)] = 0.125*(((-4*t1+t5-2)-1) < 0 ? 0: A[0][(-4*t1+t5-2) - 1][(-4*t1+t6-2)]) + 0.125*(((-4*t1+t6-2)+1) >= N ? 0 : A[0][(-4*t1+t5-2)][(-4*t1+t6-2) + 1]) + 0.125*(((-4*t1+t5-2)+1) >= N ? 0 : A[0][(-4*t1+t5-2) + 1][(-4*t1+t6-2)]) + 0.125*(((-4*t1+t6-2)-1) < 0 ? 0 : A[0][(-4*t1+t5-2)][(-4*t1+t6-2) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[0][(-4*t1+t5-2)][(-4*t1+t6-2)];; A[0][(-4*t1+t5-3)][(-4*t1+t6-3)] = 0.125*(((-4*t1+t5-3)-1) < 0 ? 0: A[1][(-4*t1+t5-3) - 1][(-4*t1+t6-3)]) + 0.125*(((-4*t1+t6-3)+1) >= N ? 0 : A[1][(-4*t1+t5-3)][(-4*t1+t6-3) + 1]) + 0.125*(((-4*t1+t5-3)+1) >= N ? 0 : A[1][(-4*t1+t5-3) + 1][(-4*t1+t6-3)]) + 0.125*(((-4*t1+t6-3)-1) < 0 ? 0 : A[1][(-4*t1+t5-3)][(-4*t1+t6-3) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[1][(-4*t1+t5-3)][(-4*t1+t6-3)];; } A[0][(-4*t1+t5-3)][(N-1)] = 0.125*(((-4*t1+t5-3)-1) < 0 ? 0: A[1][(-4*t1+t5-3) - 1][(N-1)]) + 0.125*(((N-1)+1) >= N ? 0 : A[1][(-4*t1+t5-3)][(N-1) + 1]) + 0.125*(((-4*t1+t5-3)+1) >= N ? 0 : A[1][(-4*t1+t5-3) + 1][(N-1)]) + 0.125*(((N-1)-1) < 0 ? 0 : A[1][(-4*t1+t5-3)][(N-1) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[1][(-4*t1+t5-3)][(N-1)];; } for (t6=4*t1+3;t6<=4*t1+N+2;t6++) { A[0][(N-1)][(-4*t1+t6-3)] = 0.125*(((N-1)-1) < 0 ? 0: A[1][(N-1) - 1][(-4*t1+t6-3)]) + 0.125*(((-4*t1+t6-3)+1) >= N ? 0 : A[1][(N-1)][(-4*t1+t6-3) + 1]) + 0.125*(((N-1)+1) >= N ? 0 : A[1][(N-1) + 1][(-4*t1+t6-3)]) + 0.125*(((-4*t1+t6-3)-1) < 0 ? 0 : A[1][(N-1)][(-4*t1+t6-3) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[1][(N-1)][(-4*t1+t6-3)];; } } if ((N >= 3) && (N <= 6) && (t1 == 2*t2-1) && (t1 <= min(floord(T-2,2),floord(1024*t3-N+1021,4))) && (t1 >= 256*t3+1)) { for (t5=4*t1+4;t5<=4*t1+N+1;t5++) { if ((t1+1)%2 == 0) { A[1][(-4*t1+t5-2)][0] = 0.125*(((-4*t1+t5-2)-1) < 0 ? 0: A[0][(-4*t1+t5-2) - 1][0]) + 0.125*((0 +1) >= N ? 0 : A[0][(-4*t1+t5-2)][0 + 1]) + 0.125*(((-4*t1+t5-2)+1) >= N ? 0 : A[0][(-4*t1+t5-2) + 1][0]) + 0.125*((0 -1) < 0 ? 0 : A[0][(-4*t1+t5-2)][0 - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[0][(-4*t1+t5-2)][0];; } for (t6=4*t1+3;t6<=4*t1+N+1;t6++) { if ((t1+1)%2 == 0) { A[1][(-4*t1+t5-2)][(-4*t1+t6-2)] = 0.125*(((-4*t1+t5-2)-1) < 0 ? 0: A[0][(-4*t1+t5-2) - 1][(-4*t1+t6-2)]) + 0.125*(((-4*t1+t6-2)+1) >= N ? 0 : A[0][(-4*t1+t5-2)][(-4*t1+t6-2) + 1]) + 0.125*(((-4*t1+t5-2)+1) >= N ? 0 : A[0][(-4*t1+t5-2) + 1][(-4*t1+t6-2)]) + 0.125*(((-4*t1+t6-2)-1) < 0 ? 0 : A[0][(-4*t1+t5-2)][(-4*t1+t6-2) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[0][(-4*t1+t5-2)][(-4*t1+t6-2)];; } if ((t1+1)%2 == 0) { A[0][(-4*t1+t5-3)][(-4*t1+t6-3)] = 0.125*(((-4*t1+t5-3)-1) < 0 ? 0: A[1][(-4*t1+t5-3) - 1][(-4*t1+t6-3)]) + 0.125*(((-4*t1+t6-3)+1) >= N ? 0 : A[1][(-4*t1+t5-3)][(-4*t1+t6-3) + 1]) + 0.125*(((-4*t1+t5-3)+1) >= N ? 0 : A[1][(-4*t1+t5-3) + 1][(-4*t1+t6-3)]) + 0.125*(((-4*t1+t6-3)-1) < 0 ? 0 : A[1][(-4*t1+t5-3)][(-4*t1+t6-3) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[1][(-4*t1+t5-3)][(-4*t1+t6-3)];; } } if ((t1+1)%2 == 0) { A[0][(-4*t1+t5-3)][(N-1)] = 0.125*(((-4*t1+t5-3)-1) < 0 ? 0: A[1][(-4*t1+t5-3) - 1][(N-1)]) + 0.125*(((N-1)+1) >= N ? 0 : A[1][(-4*t1+t5-3)][(N-1) + 1]) + 0.125*(((-4*t1+t5-3)+1) >= N ? 0 : A[1][(-4*t1+t5-3) + 1][(N-1)]) + 0.125*(((N-1)-1) < 0 ? 0 : A[1][(-4*t1+t5-3)][(N-1) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[1][(-4*t1+t5-3)][(N-1)];; } } for (t6=4*t1+3;t6<=4*t1+N+2;t6++) { if ((t1+1)%2 == 0) { A[0][(N-1)][(-4*t1+t6-3)] = 0.125*(((N-1)-1) < 0 ? 0: A[1][(N-1) - 1][(-4*t1+t6-3)]) + 0.125*(((-4*t1+t6-3)+1) >= N ? 0 : A[1][(N-1)][(-4*t1+t6-3) + 1]) + 0.125*(((N-1)+1) >= N ? 0 : A[1][(N-1) + 1][(-4*t1+t6-3)]) + 0.125*(((-4*t1+t6-3)-1) < 0 ? 0 : A[1][(N-1)][(-4*t1+t6-3) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[1][(N-1)][(-4*t1+t6-3)];; } } } if ((t1 <= min(min(floord(T-2,2),floord(8*t2-N+2,4)),2*t2-2)) && (t1 >= max(max(ceild(8*t2-N-1,4),ceild(1024*t3-N+1022,4)),256*t3))) { for (t5=8*t2;t5<=4*t1+N+1;t5++) { A[1][(-4*t1+t5-2)][0] = 0.125*(((-4*t1+t5-2)-1) < 0 ? 0: A[0][(-4*t1+t5-2) - 1][0]) + 0.125*((0 +1) >= N ? 0 : A[0][(-4*t1+t5-2)][0 + 1]) + 0.125*(((-4*t1+t5-2)+1) >= N ? 0 : A[0][(-4*t1+t5-2) + 1][0]) + 0.125*((0 -1) < 0 ? 0 : A[0][(-4*t1+t5-2)][0 - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[0][(-4*t1+t5-2)][0];; for (t6=4*t1+3;t6<=1024*t3+1023;t6++) { A[1][(-4*t1+t5-2)][(-4*t1+t6-2)] = 0.125*(((-4*t1+t5-2)-1) < 0 ? 0: A[0][(-4*t1+t5-2) - 1][(-4*t1+t6-2)]) + 0.125*(((-4*t1+t6-2)+1) >= N ? 0 : A[0][(-4*t1+t5-2)][(-4*t1+t6-2) + 1]) + 0.125*(((-4*t1+t5-2)+1) >= N ? 0 : A[0][(-4*t1+t5-2) + 1][(-4*t1+t6-2)]) + 0.125*(((-4*t1+t6-2)-1) < 0 ? 0 : A[0][(-4*t1+t5-2)][(-4*t1+t6-2) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[0][(-4*t1+t5-2)][(-4*t1+t6-2)];; A[0][(-4*t1+t5-3)][(-4*t1+t6-3)] = 0.125*(((-4*t1+t5-3)-1) < 0 ? 0: A[1][(-4*t1+t5-3) - 1][(-4*t1+t6-3)]) + 0.125*(((-4*t1+t6-3)+1) >= N ? 0 : A[1][(-4*t1+t5-3)][(-4*t1+t6-3) + 1]) + 0.125*(((-4*t1+t5-3)+1) >= N ? 0 : A[1][(-4*t1+t5-3) + 1][(-4*t1+t6-3)]) + 0.125*(((-4*t1+t6-3)-1) < 0 ? 0 : A[1][(-4*t1+t5-3)][(-4*t1+t6-3) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[1][(-4*t1+t5-3)][(-4*t1+t6-3)];; } } for (t6=4*t1+3;t6<=1024*t3+1023;t6++) { A[0][(N-1)][(-4*t1+t6-3)] = 0.125*(((N-1)-1) < 0 ? 0: A[1][(N-1) - 1][(-4*t1+t6-3)]) + 0.125*(((-4*t1+t6-3)+1) >= N ? 0 : A[1][(N-1)][(-4*t1+t6-3) + 1]) + 0.125*(((N-1)+1) >= N ? 0 : A[1][(N-1) + 1][(-4*t1+t6-3)]) + 0.125*(((-4*t1+t6-3)-1) < 0 ? 0 : A[1][(N-1)][(-4*t1+t6-3) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[1][(N-1)][(-4*t1+t6-3)];; } } if ((N >= 3) && (N <= 6) && (t1 == 2*t2-1) && (t1 == 256*t3+255) && (t1 <= floord(T-2,2))) { for (t5=4*t1+4;t5<=4*t1+N+1;t5++) { if ((t1+1)%2 == 0) { if ((t1+1)%256 == 0) { A[1][(-4*t1+t5-2)][0] = 0.125*(((-4*t1+t5-2)-1) < 0 ? 0: A[0][(-4*t1+t5-2) - 1][0]) + 0.125*((0 +1) >= N ? 0 : A[0][(-4*t1+t5-2)][0 + 1]) + 0.125*(((-4*t1+t5-2)+1) >= N ? 0 : A[0][(-4*t1+t5-2) + 1][0]) + 0.125*((0 -1) < 0 ? 0 : A[0][(-4*t1+t5-2)][0 - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[0][(-4*t1+t5-2)][0];; } } if ((t1+1)%2 == 0) { if ((t1+1)%256 == 0) { A[1][(-4*t1+t5-2)][1] = 0.125*(((-4*t1+t5-2)-1) < 0 ? 0: A[0][(-4*t1+t5-2) - 1][1]) + 0.125*((1 +1) >= N ? 0 : A[0][(-4*t1+t5-2)][1 + 1]) + 0.125*(((-4*t1+t5-2)+1) >= N ? 0 : A[0][(-4*t1+t5-2) + 1][1]) + 0.125*((1 -1) < 0 ? 0 : A[0][(-4*t1+t5-2)][1 - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[0][(-4*t1+t5-2)][1];; } } if ((t1+1)%2 == 0) { if ((t1+1)%256 == 0) { A[0][(-4*t1+t5-3)][0] = 0.125*(((-4*t1+t5-3)-1) < 0 ? 0: A[1][(-4*t1+t5-3) - 1][0]) + 0.125*((0 +1) >= N ? 0 : A[1][(-4*t1+t5-3)][0 + 1]) + 0.125*(((-4*t1+t5-3)+1) >= N ? 0 : A[1][(-4*t1+t5-3) + 1][0]) + 0.125*((0 -1) < 0 ? 0 : A[1][(-4*t1+t5-3)][0 - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[1][(-4*t1+t5-3)][0];; } } } if ((t1+1)%2 == 0) { if ((t1+1)%256 == 0) { A[0][(N-1)][0] = 0.125*(((N-1)-1) < 0 ? 0: A[1][(N-1) - 1][0]) + 0.125*((0 +1) >= N ? 0 : A[1][(N-1)][0 + 1]) + 0.125*(((N-1)+1) >= N ? 0 : A[1][(N-1) + 1][0]) + 0.125*((0 -1) < 0 ? 0 : A[1][(N-1)][0 - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[1][(N-1)][0];; } } } if ((t1 <= min(min(min(floord(T-3,2),floord(8*t2-N+3,4)),floord(1024*t3-N+1019,4)),2*t2-1)) && (t1 >= max(ceild(8*t2-N,4),256*t3-1))) { for (t5=8*t2+1;t5<=8*t2+2;t5++) { for (t6=4*t1+4;t6<=4*t1+N+3;t6++) { A[1][(-4*t1+t5-4)][(-4*t1+t6-4)] = 0.125*(((-4*t1+t5-4)-1) < 0 ? 0: A[0][(-4*t1+t5-4) - 1][(-4*t1+t6-4)]) + 0.125*(((-4*t1+t6-4)+1) >= N ? 0 : A[0][(-4*t1+t5-4)][(-4*t1+t6-4) + 1]) + 0.125*(((-4*t1+t5-4)+1) >= N ? 0 : A[0][(-4*t1+t5-4) + 1][(-4*t1+t6-4)]) + 0.125*(((-4*t1+t6-4)-1) < 0 ? 0 : A[0][(-4*t1+t5-4)][(-4*t1+t6-4) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[0][(-4*t1+t5-4)][(-4*t1+t6-4)];; } } for (t5=8*t2+3;t5<=4*t1+N+3;t5++) { A[1][(-4*t1+t5-4)][0] = 0.125*(((-4*t1+t5-4)-1) < 0 ? 0: A[0][(-4*t1+t5-4) - 1][0]) + 0.125*((0 +1) >= N ? 0 : A[0][(-4*t1+t5-4)][0 + 1]) + 0.125*(((-4*t1+t5-4)+1) >= N ? 0 : A[0][(-4*t1+t5-4) + 1][0]) + 0.125*((0 -1) < 0 ? 0 : A[0][(-4*t1+t5-4)][0 - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[0][(-4*t1+t5-4)][0];; for (t6=4*t1+5;t6<=4*t1+N+3;t6++) { A[1][(-4*t1+t5-4)][(-4*t1+t6-4)] = 0.125*(((-4*t1+t5-4)-1) < 0 ? 0: A[0][(-4*t1+t5-4) - 1][(-4*t1+t6-4)]) + 0.125*(((-4*t1+t6-4)+1) >= N ? 0 : A[0][(-4*t1+t5-4)][(-4*t1+t6-4) + 1]) + 0.125*(((-4*t1+t5-4)+1) >= N ? 0 : A[0][(-4*t1+t5-4) + 1][(-4*t1+t6-4)]) + 0.125*(((-4*t1+t6-4)-1) < 0 ? 0 : A[0][(-4*t1+t5-4)][(-4*t1+t6-4) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[0][(-4*t1+t5-4)][(-4*t1+t6-4)];; A[0][(-4*t1+t5-5)][(-4*t1+t6-5)] = 0.125*(((-4*t1+t5-5)-1) < 0 ? 0: A[1][(-4*t1+t5-5) - 1][(-4*t1+t6-5)]) + 0.125*(((-4*t1+t6-5)+1) >= N ? 0 : A[1][(-4*t1+t5-5)][(-4*t1+t6-5) + 1]) + 0.125*(((-4*t1+t5-5)+1) >= N ? 0 : A[1][(-4*t1+t5-5) + 1][(-4*t1+t6-5)]) + 0.125*(((-4*t1+t6-5)-1) < 0 ? 0 : A[1][(-4*t1+t5-5)][(-4*t1+t6-5) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[1][(-4*t1+t5-5)][(-4*t1+t6-5)];; } A[0][(-4*t1+t5-5)][(N-1)] = 0.125*(((-4*t1+t5-5)-1) < 0 ? 0: A[1][(-4*t1+t5-5) - 1][(N-1)]) + 0.125*(((N-1)+1) >= N ? 0 : A[1][(-4*t1+t5-5)][(N-1) + 1]) + 0.125*(((-4*t1+t5-5)+1) >= N ? 0 : A[1][(-4*t1+t5-5) + 1][(N-1)]) + 0.125*(((N-1)-1) < 0 ? 0 : A[1][(-4*t1+t5-5)][(N-1) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[1][(-4*t1+t5-5)][(N-1)];; } for (t6=4*t1+5;t6<=4*t1+N+4;t6++) { A[0][(N-1)][(-4*t1+t6-5)] = 0.125*(((N-1)-1) < 0 ? 0: A[1][(N-1) - 1][(-4*t1+t6-5)]) + 0.125*(((-4*t1+t6-5)+1) >= N ? 0 : A[1][(N-1)][(-4*t1+t6-5) + 1]) + 0.125*(((N-1)+1) >= N ? 0 : A[1][(N-1) + 1][(-4*t1+t6-5)]) + 0.125*(((-4*t1+t6-5)-1) < 0 ? 0 : A[1][(N-1)][(-4*t1+t6-5) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[1][(N-1)][(-4*t1+t6-5)];; } } for (t4=max(max(max(ceild(8*t2-N+8,2),512*t3),2*t1+2),4*t1-4*t2+4);t4<=min(min(floord(1024*t3-N+1023,2),T-1),2*t1+3);t4++) { for (t5=-8*t1+8*t2+4*t4-7;t5<=-8*t1+8*t2+4*t4-6;t5++) { for (t6=2*t4;t6<=2*t4+N-1;t6++) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.125*(((-2*t4+t5)-1) < 0 ? 0: A[0][(-2*t4+t5) - 1][(-2*t4+t6)]) + 0.125*(((-2*t4+t6)+1) >= N ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + 0.125*(((-2*t4+t5)+1) >= N ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6)]) + 0.125*(((-2*t4+t6)-1) < 0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[0][(-2*t4+t5)][(-2*t4+t6)];; } } for (t5=-8*t1+8*t2+4*t4-5;t5<=8*t2+7;t5++) { A[1][(-2*t4+t5)][0] = 0.125*(((-2*t4+t5)-1) < 0 ? 0: A[0][(-2*t4+t5) - 1][0]) + 0.125*((0 +1) >= N ? 0 : A[0][(-2*t4+t5)][0 + 1]) + 0.125*(((-2*t4+t5)+1) >= N ? 0 : A[0][(-2*t4+t5) + 1][0]) + 0.125*((0 -1) < 0 ? 0 : A[0][(-2*t4+t5)][0 - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[0][(-2*t4+t5)][0];; for (t6=2*t4+1;t6<=2*t4+N-1;t6++) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.125*(((-2*t4+t5)-1) < 0 ? 0: A[0][(-2*t4+t5) - 1][(-2*t4+t6)]) + 0.125*(((-2*t4+t6)+1) >= N ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + 0.125*(((-2*t4+t5)+1) >= N ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6)]) + 0.125*(((-2*t4+t6)-1) < 0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[0][(-2*t4+t5)][(-2*t4+t6)];; A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.125*(((-2*t4+t5-1)-1) < 0 ? 0: A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)]) + 0.125*(((-2*t4+t6-1)+1) >= N ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + 0.125*(((-2*t4+t5-1)+1) >= N ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)]) + 0.125*(((-2*t4+t6-1)-1) < 0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[1][(-2*t4+t5-1)][(-2*t4+t6-1)];; } A[0][(-2*t4+t5-1)][(N-1)] = 0.125*(((-2*t4+t5-1)-1) < 0 ? 0: A[1][(-2*t4+t5-1) - 1][(N-1)]) + 0.125*(((N-1)+1) >= N ? 0 : A[1][(-2*t4+t5-1)][(N-1) + 1]) + 0.125*(((-2*t4+t5-1)+1) >= N ? 0 : A[1][(-2*t4+t5-1) + 1][(N-1)]) + 0.125*(((N-1)-1) < 0 ? 0 : A[1][(-2*t4+t5-1)][(N-1) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[1][(-2*t4+t5-1)][(N-1)];; } } if ((N >= 3) && (t1 <= min(min(floord(8*t2-N+1,4),floord(4*t2+512*t3-N+509,4)),floord(8*t2+2*T-N-7,8))) && (t1 >= ceild(8*t2+1024*t3-N-5,8))) { if ((N+1)%2 == 0) { for (t5=8*t1-8*t2+2*N+3;t5<=8*t1-8*t2+2*N+4;t5++) { for (t6=8*t1-8*t2+N+5;t6<=8*t1-8*t2+2*N+4;t6++) { A[1][(-8*t1+8*t2+t5-N-5)][(-8*t1+8*t2+t6-N-5)] = 0.125*(((-8*t1+8*t2+t5-N-5)-1) < 0 ? 0: A[0][(-8*t1+8*t2+t5-N-5) - 1][(-8*t1+8*t2+t6-N-5)]) + 0.125*(((-8*t1+8*t2+t6-N-5)+1) >= N ? 0 : A[0][(-8*t1+8*t2+t5-N-5)][(-8*t1+8*t2+t6-N-5) + 1]) + 0.125*(((-8*t1+8*t2+t5-N-5)+1) >= N ? 0 : A[0][(-8*t1+8*t2+t5-N-5) + 1][(-8*t1+8*t2+t6-N-5)]) + 0.125*(((-8*t1+8*t2+t6-N-5)-1) < 0 ? 0 : A[0][(-8*t1+8*t2+t5-N-5)][(-8*t1+8*t2+t6-N-5) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[0][(-8*t1+8*t2+t5-N-5)][(-8*t1+8*t2+t6-N-5)];; } } for (t6=8*t1-8*t2+N+6;t6<=8*t1-8*t2+2*N+5;t6++) { A[0][(N-1)][(-8*t1+8*t2+t6-N-6)] = 0.125*(((N-1)-1) < 0 ? 0: A[1][(N-1) - 1][(-8*t1+8*t2+t6-N-6)]) + 0.125*(((-8*t1+8*t2+t6-N-6)+1) >= N ? 0 : A[1][(N-1)][(-8*t1+8*t2+t6-N-6) + 1]) + 0.125*(((N-1)+1) >= N ? 0 : A[1][(N-1) + 1][(-8*t1+8*t2+t6-N-6)]) + 0.125*(((-8*t1+8*t2+t6-N-6)-1) < 0 ? 0 : A[1][(N-1)][(-8*t1+8*t2+t6-N-6) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[1][(N-1)][(-8*t1+8*t2+t6-N-6)];; } } } if ((t1 <= min(min(floord(T-3,2),floord(8*t2-N+3,4)),256*t3+254)) && (t1 >= max(max(ceild(8*t2-N,4),ceild(1024*t3-N+1020,4)),256*t3-1))) { for (t5=8*t2+1;t5<=8*t2+2;t5++) { for (t6=4*t1+4;t6<=1024*t3+1023;t6++) { A[1][(-4*t1+t5-4)][(-4*t1+t6-4)] = 0.125*(((-4*t1+t5-4)-1) < 0 ? 0: A[0][(-4*t1+t5-4) - 1][(-4*t1+t6-4)]) + 0.125*(((-4*t1+t6-4)+1) >= N ? 0 : A[0][(-4*t1+t5-4)][(-4*t1+t6-4) + 1]) + 0.125*(((-4*t1+t5-4)+1) >= N ? 0 : A[0][(-4*t1+t5-4) + 1][(-4*t1+t6-4)]) + 0.125*(((-4*t1+t6-4)-1) < 0 ? 0 : A[0][(-4*t1+t5-4)][(-4*t1+t6-4) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[0][(-4*t1+t5-4)][(-4*t1+t6-4)];; } } for (t5=8*t2+3;t5<=4*t1+N+3;t5++) { A[1][(-4*t1+t5-4)][0] = 0.125*(((-4*t1+t5-4)-1) < 0 ? 0: A[0][(-4*t1+t5-4) - 1][0]) + 0.125*((0 +1) >= N ? 0 : A[0][(-4*t1+t5-4)][0 + 1]) + 0.125*(((-4*t1+t5-4)+1) >= N ? 0 : A[0][(-4*t1+t5-4) + 1][0]) + 0.125*((0 -1) < 0 ? 0 : A[0][(-4*t1+t5-4)][0 - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[0][(-4*t1+t5-4)][0];; for (t6=4*t1+5;t6<=1024*t3+1023;t6++) { A[1][(-4*t1+t5-4)][(-4*t1+t6-4)] = 0.125*(((-4*t1+t5-4)-1) < 0 ? 0: A[0][(-4*t1+t5-4) - 1][(-4*t1+t6-4)]) + 0.125*(((-4*t1+t6-4)+1) >= N ? 0 : A[0][(-4*t1+t5-4)][(-4*t1+t6-4) + 1]) + 0.125*(((-4*t1+t5-4)+1) >= N ? 0 : A[0][(-4*t1+t5-4) + 1][(-4*t1+t6-4)]) + 0.125*(((-4*t1+t6-4)-1) < 0 ? 0 : A[0][(-4*t1+t5-4)][(-4*t1+t6-4) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[0][(-4*t1+t5-4)][(-4*t1+t6-4)];; A[0][(-4*t1+t5-5)][(-4*t1+t6-5)] = 0.125*(((-4*t1+t5-5)-1) < 0 ? 0: A[1][(-4*t1+t5-5) - 1][(-4*t1+t6-5)]) + 0.125*(((-4*t1+t6-5)+1) >= N ? 0 : A[1][(-4*t1+t5-5)][(-4*t1+t6-5) + 1]) + 0.125*(((-4*t1+t5-5)+1) >= N ? 0 : A[1][(-4*t1+t5-5) + 1][(-4*t1+t6-5)]) + 0.125*(((-4*t1+t6-5)-1) < 0 ? 0 : A[1][(-4*t1+t5-5)][(-4*t1+t6-5) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[1][(-4*t1+t5-5)][(-4*t1+t6-5)];; } } for (t6=4*t1+5;t6<=1024*t3+1023;t6++) { A[0][(N-1)][(-4*t1+t6-5)] = 0.125*(((N-1)-1) < 0 ? 0: A[1][(N-1) - 1][(-4*t1+t6-5)]) + 0.125*(((-4*t1+t6-5)+1) >= N ? 0 : A[1][(N-1)][(-4*t1+t6-5) + 1]) + 0.125*(((N-1)+1) >= N ? 0 : A[1][(N-1) + 1][(-4*t1+t6-5)]) + 0.125*(((-4*t1+t6-5)-1) < 0 ? 0 : A[1][(N-1)][(-4*t1+t6-5) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[1][(N-1)][(-4*t1+t6-5)];; } } for (t4=max(max(max(max(ceild(8*t2-N+8,2),ceild(1024*t3-N+1024,2)),512*t3),2*t1+2),4*t1-4*t2+4);t4<=min(min(T-1,2*t1+3),512*t3+511);t4++) { for (t5=-8*t1+8*t2+4*t4-7;t5<=-8*t1+8*t2+4*t4-6;t5++) { for (t6=2*t4;t6<=1024*t3+1023;t6++) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.125*(((-2*t4+t5)-1) < 0 ? 0: A[0][(-2*t4+t5) - 1][(-2*t4+t6)]) + 0.125*(((-2*t4+t6)+1) >= N ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + 0.125*(((-2*t4+t5)+1) >= N ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6)]) + 0.125*(((-2*t4+t6)-1) < 0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[0][(-2*t4+t5)][(-2*t4+t6)];; } } for (t5=-8*t1+8*t2+4*t4-5;t5<=8*t2+7;t5++) { A[1][(-2*t4+t5)][0] = 0.125*(((-2*t4+t5)-1) < 0 ? 0: A[0][(-2*t4+t5) - 1][0]) + 0.125*((0 +1) >= N ? 0 : A[0][(-2*t4+t5)][0 + 1]) + 0.125*(((-2*t4+t5)+1) >= N ? 0 : A[0][(-2*t4+t5) + 1][0]) + 0.125*((0 -1) < 0 ? 0 : A[0][(-2*t4+t5)][0 - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[0][(-2*t4+t5)][0];; for (t6=2*t4+1;t6<=1024*t3+1023;t6++) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.125*(((-2*t4+t5)-1) < 0 ? 0: A[0][(-2*t4+t5) - 1][(-2*t4+t6)]) + 0.125*(((-2*t4+t6)+1) >= N ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + 0.125*(((-2*t4+t5)+1) >= N ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6)]) + 0.125*(((-2*t4+t6)-1) < 0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[0][(-2*t4+t5)][(-2*t4+t6)];; A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.125*(((-2*t4+t5-1)-1) < 0 ? 0: A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)]) + 0.125*(((-2*t4+t6-1)+1) >= N ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + 0.125*(((-2*t4+t5-1)+1) >= N ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)]) + 0.125*(((-2*t4+t6-1)-1) < 0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[1][(-2*t4+t5-1)][(-2*t4+t6-1)];; } } } if ((t1 <= min(min(floord(8*t2-N+1,4),floord(8*t2+2*T-N-7,8)),floord(8*t2+1024*t3-N+1017,8))) && (t1 >= max(ceild(4*t2+512*t3-N+511,4),ceild(8*t2+1024*t3-N-5,8)))) { if ((N+1)%2 == 0) { for (t5=8*t1-8*t2+2*N+3;t5<=8*t1-8*t2+2*N+4;t5++) { for (t6=8*t1-8*t2+N+5;t6<=1024*t3+1023;t6++) { A[1][(-8*t1+8*t2+t5-N-5)][(-8*t1+8*t2+t6-N-5)] = 0.125*(((-8*t1+8*t2+t5-N-5)-1) < 0 ? 0: A[0][(-8*t1+8*t2+t5-N-5) - 1][(-8*t1+8*t2+t6-N-5)]) + 0.125*(((-8*t1+8*t2+t6-N-5)+1) >= N ? 0 : A[0][(-8*t1+8*t2+t5-N-5)][(-8*t1+8*t2+t6-N-5) + 1]) + 0.125*(((-8*t1+8*t2+t5-N-5)+1) >= N ? 0 : A[0][(-8*t1+8*t2+t5-N-5) + 1][(-8*t1+8*t2+t6-N-5)]) + 0.125*(((-8*t1+8*t2+t6-N-5)-1) < 0 ? 0 : A[0][(-8*t1+8*t2+t5-N-5)][(-8*t1+8*t2+t6-N-5) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[0][(-8*t1+8*t2+t5-N-5)][(-8*t1+8*t2+t6-N-5)];; } } for (t6=8*t1-8*t2+N+6;t6<=1024*t3+1023;t6++) { A[0][(N-1)][(-8*t1+8*t2+t6-N-6)] = 0.125*(((N-1)-1) < 0 ? 0: A[1][(N-1) - 1][(-8*t1+8*t2+t6-N-6)]) + 0.125*(((-8*t1+8*t2+t6-N-6)+1) >= N ? 0 : A[1][(N-1)][(-8*t1+8*t2+t6-N-6) + 1]) + 0.125*(((N-1)+1) >= N ? 0 : A[1][(N-1) + 1][(-8*t1+8*t2+t6-N-6)]) + 0.125*(((-8*t1+8*t2+t6-N-6)-1) < 0 ? 0 : A[1][(N-1)][(-8*t1+8*t2+t6-N-6) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[1][(N-1)][(-8*t1+8*t2+t6-N-6)];; } } } if (t1 == 2*t2) { for (t4=max(max(ceild(4*t1+N,2),ceild(4*t1-N+8,2)),512*t3);t4<=min(min(floord(1024*t3-N+1023,2),T-1),2*t1+3);t4++) { for (t6=2*t4;t6<=2*t4+N-1;t6++) { if (t1%2 == 0) { A[1][0][(-2*t4+t6)] = 0.125*((0 -1) < 0 ? 0: A[0][0 - 1][(-2*t4+t6)]) + 0.125*(((-2*t4+t6)+1) >= N ? 0 : A[0][0][(-2*t4+t6) + 1]) + 0.125*((0 +1) >= N ? 0 : A[0][0 + 1][(-2*t4+t6)]) + 0.125*(((-2*t4+t6)-1) < 0 ? 0 : A[0][0][(-2*t4+t6) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[0][0][(-2*t4+t6)];; } } for (t5=2*t4+1;t5<=4*t1+7;t5++) { if (t1%2 == 0) { A[1][(-2*t4+t5)][0] = 0.125*(((-2*t4+t5)-1) < 0 ? 0: A[0][(-2*t4+t5) - 1][0]) + 0.125*((0 +1) >= N ? 0 : A[0][(-2*t4+t5)][0 + 1]) + 0.125*(((-2*t4+t5)+1) >= N ? 0 : A[0][(-2*t4+t5) + 1][0]) + 0.125*((0 -1) < 0 ? 0 : A[0][(-2*t4+t5)][0 - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[0][(-2*t4+t5)][0];; } for (t6=2*t4+1;t6<=2*t4+N-1;t6++) { if (t1%2 == 0) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.125*(((-2*t4+t5)-1) < 0 ? 0: A[0][(-2*t4+t5) - 1][(-2*t4+t6)]) + 0.125*(((-2*t4+t6)+1) >= N ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + 0.125*(((-2*t4+t5)+1) >= N ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6)]) + 0.125*(((-2*t4+t6)-1) < 0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[0][(-2*t4+t5)][(-2*t4+t6)];; } if (t1%2 == 0) { A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.125*(((-2*t4+t5-1)-1) < 0 ? 0: A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)]) + 0.125*(((-2*t4+t6-1)+1) >= N ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + 0.125*(((-2*t4+t5-1)+1) >= N ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)]) + 0.125*(((-2*t4+t6-1)-1) < 0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[1][(-2*t4+t5-1)][(-2*t4+t6-1)];; } } if (t1%2 == 0) { A[0][(-2*t4+t5-1)][(N-1)] = 0.125*(((-2*t4+t5-1)-1) < 0 ? 0: A[1][(-2*t4+t5-1) - 1][(N-1)]) + 0.125*(((N-1)+1) >= N ? 0 : A[1][(-2*t4+t5-1)][(N-1) + 1]) + 0.125*(((-2*t4+t5-1)+1) >= N ? 0 : A[1][(-2*t4+t5-1) + 1][(N-1)]) + 0.125*(((N-1)-1) < 0 ? 0 : A[1][(-2*t4+t5-1)][(N-1) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[1][(-2*t4+t5-1)][(N-1)];; } } } } if (t1 == 2*t2) { for (t4=max(max(ceild(1024*t3-N+1024,2),512*t3),2*t1+2);t4<=min(T-1,2*t1+3);t4++) { for (t6=2*t4;t6<=1024*t3+1023;t6++) { if (t1%2 == 0) { A[1][0][(-2*t4+t6)] = 0.125*((0 -1) < 0 ? 0: A[0][0 - 1][(-2*t4+t6)]) + 0.125*(((-2*t4+t6)+1) >= N ? 0 : A[0][0][(-2*t4+t6) + 1]) + 0.125*((0 +1) >= N ? 0 : A[0][0 + 1][(-2*t4+t6)]) + 0.125*(((-2*t4+t6)-1) < 0 ? 0 : A[0][0][(-2*t4+t6) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[0][0][(-2*t4+t6)];; } } for (t5=2*t4+1;t5<=4*t1+7;t5++) { if (t1%2 == 0) { A[1][(-2*t4+t5)][0] = 0.125*(((-2*t4+t5)-1) < 0 ? 0: A[0][(-2*t4+t5) - 1][0]) + 0.125*((0 +1) >= N ? 0 : A[0][(-2*t4+t5)][0 + 1]) + 0.125*(((-2*t4+t5)+1) >= N ? 0 : A[0][(-2*t4+t5) + 1][0]) + 0.125*((0 -1) < 0 ? 0 : A[0][(-2*t4+t5)][0 - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[0][(-2*t4+t5)][0];; } for (t6=2*t4+1;t6<=1024*t3+1023;t6++) { if (t1%2 == 0) { A[1][(-2*t4+t5)][(-2*t4+t6)] = 0.125*(((-2*t4+t5)-1) < 0 ? 0: A[0][(-2*t4+t5) - 1][(-2*t4+t6)]) + 0.125*(((-2*t4+t6)+1) >= N ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) + 1]) + 0.125*(((-2*t4+t5)+1) >= N ? 0 : A[0][(-2*t4+t5) + 1][(-2*t4+t6)]) + 0.125*(((-2*t4+t6)-1) < 0 ? 0 : A[0][(-2*t4+t5)][(-2*t4+t6) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[0][(-2*t4+t5)][(-2*t4+t6)];; } if (t1%2 == 0) { A[0][(-2*t4+t5-1)][(-2*t4+t6-1)] = 0.125*(((-2*t4+t5-1)-1) < 0 ? 0: A[1][(-2*t4+t5-1) - 1][(-2*t4+t6-1)]) + 0.125*(((-2*t4+t6-1)+1) >= N ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) + 1]) + 0.125*(((-2*t4+t5-1)+1) >= N ? 0 : A[1][(-2*t4+t5-1) + 1][(-2*t4+t6-1)]) + 0.125*(((-2*t4+t6-1)-1) < 0 ? 0 : A[1][(-2*t4+t5-1)][(-2*t4+t6-1) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[1][(-2*t4+t5-1)][(-2*t4+t6-1)];; } } } } } if (t1 <= min(min(floord(8*t2-N,4),floord(8*t2+2*T-N-8,8)),floord(8*t2+1024*t3-N+1016,8))) { if (N%2 == 0) { for (t6=max(1024*t3,8*t1-8*t2+N+6);t6<=min(1024*t3+1023,8*t1-8*t2+2*N+5);t6++) { A[1][(N-1)][(-8*t1+8*t2+t6-N-6)] = 0.125*(((N-1)-1) < 0 ? 0: A[0][(N-1) - 1][(-8*t1+8*t2+t6-N-6)]) + 0.125*(((-8*t1+8*t2+t6-N-6)+1) >= N ? 0 : A[0][(N-1)][(-8*t1+8*t2+t6-N-6) + 1]) + 0.125*(((N-1)+1) >= N ? 0 : A[0][(N-1) + 1][(-8*t1+8*t2+t6-N-6)]) + 0.125*(((-8*t1+8*t2+t6-N-6)-1) < 0 ? 0 : A[0][(N-1)][(-8*t1+8*t2+t6-N-6) - 1]) + (-2.0*(0.125*2.0) + 1.0)*A[0][(N-1)][(-8*t1+8*t2+t6-N-6)];; } } } } } } } /* End of CLooG code */ #undef T #define T 16000 // #undef N // #define N 16000L #ifdef TIME gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double)(result.tv_sec + result.tv_usec * 1.0e-6); printf("|Time taken = %7.5lfs\n", tdiff ); printf("|MFLOPS = %f\n", ((((double)NUM_FP_OPS * N *N * T) / tdiff) / 1000000L)); #endif #ifdef VERIFY for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { total+= A[T%2][i][j] ; } } printf("|sum: %e\t", total); for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { sum_err_sqr += (A[T%2][i][j] - (total/N))*(A[T%2][i][j] - (total/N)); } } printf("|rms(A) = %7.2f\t", sqrt(sum_err_sqr)); for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { chtotal += ((char *)A[T%2][i])[j]; } } printf("|sum(rep(A)) = %d\n", chtotal); #endif return 0; } // icc -O3 -fp-model precise heat_1d_np.c -o op-heat-1d-np -lm // /* @ begin PrimeTile (num_tiling_levels=1; first_depth=1; last_depth=-1; boundary_tiling_level=-1;) @*/ // /* @ begin PrimeRegTile (scalar_replacement=0; T1t3=8; T1t4=8; ) @*/ // /* @ end @*/
task-taskwait-nested.c
/* * task-taskwait-nested.c -- Archer testcase */ //===----------------------------------------------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // // See tools/archer/LICENSE.txt for details. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // RUN: %libarcher-compile-and-run | FileCheck %s #include <omp.h> #include <stdio.h> #include <unistd.h> #include "ompt/ompt-signal.h" int main(int argc, char *argv[]) { int var = 0, a = 0; #pragma omp parallel num_threads(2) shared(var, a) #pragma omp master { #pragma omp task { #pragma omp task shared(var, a) { OMPT_SIGNAL(a); delay(100); var++; } #pragma omp taskwait } // Give other thread time to steal the task and execute its child. OMPT_WAIT(a, 1); #pragma omp taskwait var++; } fprintf(stderr, "DONE\n"); int error = (var != 2); return error; } // CHECK-NOT: ThreadSanitizer: data race // CHECK-NOT: ThreadSanitizer: reported // CHECK: DONE
task-taskgroup-unrelated.c
/* * task-taskgroup-unrelated.c -- Archer testcase */ //===----------------------------------------------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // // See tools/archer/LICENSE.txt for details. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // RUN: %libarcher-compile-and-run-race | FileCheck %s // RUN: %libarcher-compile-and-run-race-noserial | FileCheck %s // REQUIRES: tsan #include "ompt/ompt-signal.h" #include <omp.h> #include <stdio.h> #include <unistd.h> int main(int argc, char *argv[]) { int var = 0, a = 0; #pragma omp parallel num_threads(2) shared(var, a) #pragma omp master { #pragma omp task shared(var, a) { var++; OMPT_SIGNAL(a); // Give master thread time to execute the task in the taskgroup. OMPT_WAIT(a, 2); } #pragma omp taskgroup { #pragma omp task if (0) { // Dummy task. } // Give other threads time to steal the tasks. OMPT_WAIT(a, 1); OMPT_SIGNAL(a); } var++; } int error = (var != 2); fprintf(stderr, "DONE\n"); return error; } // CHECK: WARNING: ThreadSanitizer: data race // CHECK-NEXT: {{(Write|Read)}} of size 4 // CHECK-NEXT: #0 {{.*}}task-taskgroup-unrelated.c:47 // CHECK: Previous write of size 4 // CHECK-NEXT: #0 {{.*}}task-taskgroup-unrelated.c:29 // CHECK: DONE // CHECK: ThreadSanitizer: reported 1 warnings
convdw3x3s2_sse.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #include "option.h" #include "mat.h" namespace ncnn{ static void convdw3x3s2_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int outw = top_blob.w; int outh = top_blob.h; const int group = bottom_blob.c; const int tailstep = w - 2*outw + w; const float* kernel = _kernel; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int g=0; g<group; g++) { Mat out = top_blob.channel(g); const float bias0 = bias ? bias[g] : 0.f; const float* kernel0 = kernel + g*9; float* outptr = out; const float* img0 = bottom_blob.channel(g); const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w*2; const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; int i = 0; for (; i < outh; i++) { int remain = outw; for (; remain>0; remain--) { float sum = bias0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; *outptr = sum; r0 += 2; r1 += 2; r2 += 2; outptr++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } } } }
questao02.c
#include <stdio.h> #include <stdlib.h> #include "omp.h" int main() { int num_threads; printf("\nquantidade de threads: "); scanf("%d", &num_threads); if(num_threads < 1) { printf("numero invalido"); return 1; } omp_set_num_threads(num_threads); #pragma omp parallel { #pragma omp single { printf("iniciando o mundo paralelo"); } int thread_num = omp_get_thread_num(); printf("\neu sou a thread %d", thread_num); #pragma omp barrier #pragma omp master { printf("\nFIM"); } } return 0; }
snoop.c
/* * compute the duplex structure of two RNA strands, * allowing only inter-strand base pairs. * see cofold() for computing hybrid structures without * restriction. * * Ivo Hofacker * Vienna RNA package */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <stdio.h> #include <stdlib.h> #include <math.h> #include <ctype.h> #include <string.h> #include "ViennaRNA/utils/basic.h" #include "ViennaRNA/utils/strings.h" #include "ViennaRNA/params/default.h" #include "ViennaRNA/fold_vars.h" #include "ViennaRNA/snofold.h" #include "ViennaRNA/pair_mat.h" #include "ViennaRNA/params/basic.h" #include "ViennaRNA/snoop.h" #include "ViennaRNA/plotting/probabilities.h" #include "ViennaRNA/plotting/structures.h" /* #include "ViennaRNA/fold.h" */ #include "ViennaRNA/duplex.h" #include "ViennaRNA/loops/all.h" #define STACK_BULGE1 1 /* stacking energies for bulges of size 1 */ #define NEW_NINIO 1 /* new asymetry penalty */ PRIVATE void encode_seqs(const char *s1, const char *s2); PRIVATE short * encode_seq(const char *seq); PRIVATE void find_max_snoop(const char *s1, const char *s2, const int max, const int alignment_length, const int *position, const int delta, const int distance, const int penalty, const int threshloop, const int threshLE, const int threshRE, const int threshDE, const int threshTE, const int threshSE, const int threshD, const int half_stem, const int max_half_stem, const int min_s2, const int max_s2, const int min_s1, const int max_s1, const int min_d1, const int min_d2, const char *name, const int fullStemEnergy); PRIVATE void find_max_snoop_XS(const char *s1, const char *s2, const int **access_s1, const int max, const int alignment_length, const int *position, const int *position_j, const int delta, const int distance, const int penalty, const int threshloop, const int threshLE, const int threshRE, const int threshDE, const int threshTE, const int threshSE, const int threshD, const int half_stem, const int max_half_stem, const int min_s2, const int max_s2, const int min_s1, const int max_s1, const int min_d1, const int min_d2, const char *name, const int fullStemEnergy); PRIVATE char * alisnoop_backtrack(int i, int j, const char **s2, int *Duplex_El, int *Duplex_Er, int *Loop_E, int *Loop_D, int *u, int *pscd, int *psct, int *pscg, const int penalty, const int threshloop, const int threshLE, const int threshRE, const int threshDE, const int threshD, const int half_stem, const int max_half_stem, const int min_s2, const int max_s2, const int min_s1, const int max_s1, const int min_d1, const int min_d2, const short **S1, const short **S2); PRIVATE char * snoop_backtrack(int i, int j, const char *s2, int *Duplex_El, int *Duplex_Er, int *Loop_E, int *Loop_D, int *u, const int penalty, const int threshloop, const int threshLE, const int threshRE, const int threshDE, const int threshD, const int half_stem, const int max_half_stem, const int min_s2, const int max_s2, const int min_s1, const int max_s1, const int min_d1, const int min_d2); PRIVATE char * snoop_backtrack_XS(int i, int j, const char *s2, int *Duplex_El, int *Duplex_Er, int *Loop_E, int *Loop_D, int *u, const int penalty, const int threshloop, const int threshLE, const int threshRE, const int threshDE, const int threshD, const int half_stem, const int max_half_stem, const int min_s2, const int max_s2, const int min_s1, const int max_s1, const int min_d1, const int min_d2); PRIVATE int compare(const void *sub1, const void *sub2); PRIVATE int covscore(const int *types, int n_seq); PRIVATE short * aliencode_seq(const char *sequence); PUBLIC int snoop_subopt_sorted = 0; /* from subopt.c, default 0 */ /*@unused@*/ #define MAXLOOP_L 3 #define MIN2(A, B) ((A) < (B) ? (A) : (B)) #define MAX2(A, B) ((A) > (B) ? (A) : (B)) #define ASS 1 PRIVATE vrna_param_t *P = NULL; PRIVATE int **c = NULL; /* energy array, given that i-j pair */ PRIVATE int **r = NULL; PRIVATE int **lc = NULL; /* energy array, given that i-j pair */ PRIVATE int **lr = NULL; PRIVATE int **c_fill = NULL; PRIVATE int **r_fill = NULL; PRIVATE int **lpair = NULL; PRIVATE short *S1 = NULL, *SS1 = NULL, *S2 = NULL, *SS2 = NULL; PRIVATE short *S1_fill = NULL, *SS1_fill = NULL, *S2_fill = NULL, *SS2_fill = NULL; PRIVATE int n1, n2; /* sequence lengths */ extern int cut_point; PRIVATE int delay_free = 0; /*--------------------------------------------------------------------------*/ snoopT alisnoopfold(const char **s1, const char **s2, const int penalty, const int threshloop, const int threshLE, const int threshRE, const int threshDE, const int threshD, const int half_stem, const int max_half_stem, const int min_s2, const int max_s2, const int min_s1, const int max_s1, const int min_d1, const int min_d2) { int s, n_seq; int i, j, E, l1, Emin = INF, i_min = 0, j_min = 0; char *struc; snoopT mfe; int *indx; int *mLoop; int *cLoop; folden **foldlist; folden **foldlist_XS; int Duplex_El, Duplex_Er, pscd, psct, pscg; int Loop_D; int u; int Loop_E; short **Sali1, **Sali2; int *type, *type2, *type3; vrna_md_t md; Duplex_El = 0; Duplex_Er = 0; Loop_E = 0; Loop_D = 0; pscd = 0; psct = 0; pscg = 0; snoexport_fold_arrays(&indx, &mLoop, &cLoop, &foldlist, &foldlist_XS); n1 = (int)strlen(s1[0]); n2 = (int)strlen(s2[0]); for (s = 0; s1[s] != NULL; s++) ; n_seq = s; for (s = 0; s2[s] != NULL; s++) ; if (n_seq != s) vrna_message_error("unequal number of sequences in aliduplexfold()\n"); set_model_details(&md); if ((!P) || (fabs(P->temperature - temperature) > 1e-6)) { snoupdate_fold_params(); if (P) free(P); P = vrna_params(&md); make_pair_matrix(); } c = (int **)vrna_alloc(sizeof(int *) * (n1 + 1)); r = (int **)vrna_alloc(sizeof(int *) * (n1 + 1)); for (i = 0; i <= n1; i++) { c[i] = (int *)vrna_alloc(sizeof(int) * (n2 + 1)); r[i] = (int *)vrna_alloc(sizeof(int) * (n2 + 1)); for (j = n2; j > -1; j--) { c[i][j] = INF; r[i][j] = INF; } } Sali1 = (short **)vrna_alloc((n_seq + 1) * sizeof(short *)); Sali2 = (short **)vrna_alloc((n_seq + 1) * sizeof(short *)); for (s = 0; s < n_seq; s++) { if ((int)strlen(s1[s]) != n1) vrna_message_error("uneqal seqence lengths"); if ((int)strlen(s2[s]) != n2) vrna_message_error("uneqal seqence lengths"); Sali1[s] = aliencode_seq(s1[s]); Sali2[s] = aliencode_seq(s2[s]); } type = (int *)vrna_alloc(n_seq * sizeof(int)); type2 = (int *)vrna_alloc(n_seq * sizeof(int)); type3 = (int *)vrna_alloc(n_seq * sizeof(int)); /* encode_seqs(s1, s2); */ for (i = 6; i <= n1 - 5; i++) { int U; U = 0; for (s = 0; s < n_seq; s++) U += Sali1[s][i - 2]; U = (U == (n_seq) * 4 ? 1 : 0); for (j = n2 - min_d2; j > min_d1; j--) { int type4, k, l, psc, psc2, psc3; for (s = 0; s < n_seq; s++) type[s] = pair[Sali1[s][i]][Sali2[s][j]]; psc = covscore(type, n_seq); for (s = 0; s < n_seq; s++) if (type[s] == 0) type[s] = 7; c[i][j] = (psc >= MINPSCORE) ? (n_seq * P->DuplexInit) : INF; if (psc < MINPSCORE) continue; if (/* pair[Sali1[i+1]][Sali2[j-1]] && */ U && j < max_s1 && j > min_s1 && j > n2 - max_s2 - max_half_stem && j < n2 - min_s2 - half_stem) { /*constraint on s2 and i*/ folden *temp; temp = foldlist[j + 1]; while (temp->next) { int k = temp->k; for (s = 0; s < n_seq; s++) { type2[s] = pair[Sali1[s][i - 3]][Sali2[s][k + 1]]; type3[s] = pair[Sali1[s][i - 4]][Sali2[s][k + 1]]; } psc2 = covscore(type2, n_seq); psc3 = covscore(type3, n_seq); if (psc2 > MINPSCORE) r[i][j] = MIN2(r[i][j], c[i - 3][k + 1] + temp->energy); if (psc3 > MINPSCORE) r[i][j] = MIN2(r[i][j], c[i - 4][k + 1] + temp->energy); temp = temp->next; } } /* dangle 5'SIDE relative to the mRNA */ for (s = 0; s < n_seq; s++) c[i][j] += vrna_E_ext_stem(type[s], Sali1[s][i - 1], Sali2[s][j + 1], P); for (k = i - 1; k > 0 && (i - k) < MAXLOOP_L; k--) { for (l = j + 1; l <= n2; l++) { if (i - k + l - j > 2 * MAXLOOP_L - 2) break; if (abs(i - k - l + j) >= ASS) continue; for (E = s = 0; s < n_seq; s++) { type4 = pair[Sali1[s][k]][Sali2[s][l]]; if (type4 == 0) type4 = 7; E += E_IntLoop(i - k - 1, l - j - 1, type4, rtype[type[s]], Sali1[s][k + 1], Sali2[s][l - 1], Sali1[s][i - 1], Sali2[s][j + 1], P); } c[i][j] = MIN2(c[i][j], c[k][l] + E); r[i][j] = MIN2(r[i][j], r[k][l] + E); } } c[i][j] -= psc; r[i][j] -= psc; E = r[i][j]; for (s = 0; s < n_seq; s++) E += vrna_E_ext_stem(rtype[type[s]], Sali2[s][j - 1], Sali1[s][i + 1], P); /** *** if (i<n1) E += P->dangle3[rtype[type[s]]][Sali1[s][i+1]]; *** if (j>1) E += P->dangle5[rtype[type[s]]][Sali2[s][j-1]]; *** if (type[s]>2) E += P->TerminalAU; **/ if (E < Emin) { Emin = E; i_min = i; j_min = j; } } } if (Emin > 0) { printf("no target found under the constraints chosen\n"); for (i = 0; i <= n1; i++) { free(r[i]); free(c[i]); } free(c); free(r); for (s = 0; s < n_seq; s++) { free(Sali1[s]); free(Sali2[s]); } free(Sali1); free(Sali2); free(S2); free(SS1); free(SS2); free(type); free(type2); free(type3); mfe.energy = INF; mfe.structure = NULL; return mfe; } struc = alisnoop_backtrack(i_min, j_min, (const char **)s2, &Duplex_El, &Duplex_Er, &Loop_E, &Loop_D, &u, &pscd, &psct, &pscg, penalty, threshloop, threshLE, threshRE, threshDE, threshD, half_stem, max_half_stem, min_s2, max_s2, min_s1, max_s1, min_d1, min_d2, (const short **)Sali1, (const short **)Sali2); /* if (i_min<n1-5) i_min++; */ /* if (j_min>6 ) j_min--; */ l1 = strchr(struc, '&') - struc; mfe.i = i_min - 5; mfe.j = j_min - 5; mfe.u = u - 5; mfe.Duplex_Er = (float)Duplex_Er / 100; mfe.Duplex_El = (float)Duplex_El / 100; mfe.Loop_D = (float)Loop_D / 100; mfe.Loop_E = (float)Loop_E / 100; mfe.energy = (float)Emin / 100; /* mfe.fullStemEnergy = (float) fullStemEnergy/100; */ mfe.pscd = pscd; mfe.psct = psct; mfe.structure = struc; for (s = 0; s < n_seq; s++) { free(Sali1[s]); free(Sali2[s]); } free(Sali1); free(Sali2); free(type); free(type2); free(type3); if (!delay_free) { for (i = 0; i <= n1; i++) { free(r[i]); free(c[i]); } free(c); free(r); free(S2); free(SS1); free(SS2); } return mfe; } PUBLIC snoopT * alisnoop_subopt(const char **s1, const char **s2, int delta, int w, const int penalty, const int threshloop, const int threshLE, const int threshRE, const int threshDE, const int threshTE, const int threshSE, const int threshD, const int distance, const int half_stem, const int max_half_stem, const int min_s2, const int max_s2, const int min_s1, const int max_s1, const int min_d1, const int min_d2) { short **Sali1, **Sali2; /* printf("%d %d\n", min_s2, max_s2); */ int i, j, s, n_seq, n1, n2, E, n_subopt = 0, n_max; char *struc; snoopT mfe; snoopT *subopt; int thresh; int *type; int Duplex_El, Duplex_Er, Loop_E, pscd, psct, pscg; int Loop_D; Duplex_El = 0; Duplex_Er = 0; Loop_E = 0; Loop_D = 0; pscd = 0; psct = 0; pscg = 0; int u; u = 0; n_max = 16; subopt = (snoopT *)vrna_alloc(n_max * sizeof(snoopT)); delay_free = 1; mfe = alisnoopfold(s1, s2, penalty, threshloop, threshLE, threshRE, threshDE, threshD, half_stem, max_half_stem, min_s2, max_s2, min_s1, max_s1, min_d1, min_d2); if (mfe.energy > 0) { free(subopt); delay_free = 0; return NULL; } thresh = MIN2((int)((mfe.Duplex_Er + mfe.Duplex_El + mfe.Loop_E) * 100 + 0.1 + 410) + delta, threshTE); /* subopt[n_subopt++]=mfe; */ free(mfe.structure); n1 = (int)strlen(s1[0]); n2 = (int)strlen(s2[0]); for (s = 0; s1[s] != NULL; s++) ; n_seq = s; Sali1 = (short **)vrna_alloc((n_seq + 1) * sizeof(short *)); Sali2 = (short **)vrna_alloc((n_seq + 1) * sizeof(short *)); for (s = 0; s < n_seq; s++) { if ((int)strlen(s1[s]) != n1) vrna_message_error("uneqal seqence lengths"); if ((int)strlen(s2[s]) != n2) vrna_message_error("uneqal seqence lengths"); Sali1[s] = aliencode_seq(s1[s]); Sali2[s] = aliencode_seq(s2[s]); } Sali1[n_seq] = NULL; Sali2[n_seq] = NULL; type = (int *)vrna_alloc(n_seq * sizeof(int)); for (i = n1; i > 1; i--) { for (j = 1; j <= n2; j++) { int ii, jj, Ed, psc, skip; for (s = 0; s < n_seq; s++) type[s] = pair[Sali2[s][j]][Sali1[s][i]]; psc = covscore(type, n_seq); for (s = 0; s < n_seq; s++) if (type[s] == 0) type[s] = 7; if (psc < MINPSCORE) continue; E = Ed = r[i][j]; for (s = 0; s < n_seq; s++) { /* if (i<n1-5) Ed += P->dangle3[type[s]][Sali1[s][i+1]]; */ /* if (j>6) Ed += P->dangle5[type[s]][Sali2[s][j-1]]; */ if (type[s] > 2) Ed += P->TerminalAU; } if (Ed > thresh) continue; /* too keep output small, remove hits that are dominated by a * better one close (w) by. For simplicity we do test without * adding dangles, which is slightly inaccurate. */ w = 1; for (skip = 0, ii = MAX2(i - w, 1); (ii <= MIN2(i + w, n1)) && type; ii++) { for (jj = MAX2(j - w, 1); jj <= MIN2(j + w, n2); jj++) if (r[ii][jj] < E) { skip = 1; break; } } if (skip) continue; psct = 0; pscg = 0; struc = alisnoop_backtrack(i, j, s2, &Duplex_El, &Duplex_Er, &Loop_E, &Loop_D, &u, &pscd, &psct, &pscg, penalty, threshloop, threshLE, threshRE, threshDE, threshD, half_stem, max_half_stem, min_s2, max_s2, min_s1, max_s1, min_d1, min_d2, (const short int **)Sali1, (const int short **)Sali2); if (Duplex_Er > threshRE || Duplex_El > threshLE || Loop_D > threshD || (Duplex_Er + Duplex_El) > threshDE || (Duplex_Er + Duplex_El + Loop_E) > threshTE || (Duplex_Er + Duplex_El + Loop_E + Loop_D + 410) > threshSE) { /* printf(" Duplex_Er %d threshRE %d Duplex_El %d threshLE %d \n" */ /* " Duplex_Er + Duplex_El %d threshDE %d \n" */ /* " Duplex_Er + Duplex_El + Loop_E %d threshTE %d \n" */ /* " Duplex_Er + Duplex_El + Loop_E + Loop_D %d threshSE %d \n", */ /* Duplex_Er , threshRE , Duplex_El ,threshLE, */ /* Duplex_Er + Duplex_El, threshDE, */ /* Duplex_Er + Duplex_El+ Loop_E , threshTE, */ /* Duplex_Er + Duplex_El+ Loop_E + Loop_D, threshSE); */ Duplex_Er = 0; Duplex_El = 0; Loop_E = 0; Loop_D = 0; u = 0, free(struc); continue; } if (n_subopt + 1 >= n_max) { n_max *= 2; subopt = (snoopT *)vrna_realloc(subopt, n_max * sizeof(snoopT)); } subopt[n_subopt].i = i - 5; subopt[n_subopt].j = j - 5; subopt[n_subopt].u = u - 5; subopt[n_subopt].Duplex_Er = Duplex_Er * 0.01; subopt[n_subopt].Duplex_El = Duplex_El * 0.01; subopt[n_subopt].Loop_E = Loop_E * 0.01; subopt[n_subopt].Loop_D = Loop_D * 0.01; subopt[n_subopt].energy = (Duplex_Er + Duplex_El + Loop_E + Loop_D + 410) * 0.01; subopt[n_subopt].pscd = pscd * 0.01; subopt[n_subopt].psct = -psct * 0.01; subopt[n_subopt++].structure = struc; /* i=u; */ Duplex_Er = 0; Duplex_El = 0; Loop_E = 0; Loop_D = 0; u = 0; pscd = 0; psct = 0; } } for (i = 0; i <= n1; i++) { free(c[i]); free(r[i]); } free(c); free(r); for (s = 0; s < n_seq; s++) { free(Sali1[s]); free(Sali2[s]); } free(Sali1); free(Sali2); free(type); if (snoop_subopt_sorted) qsort(subopt, n_subopt, sizeof(snoopT), compare); subopt[n_subopt].i = 0; subopt[n_subopt].j = 0; subopt[n_subopt].structure = NULL; return subopt; } PRIVATE char * alisnoop_backtrack(int i, int j, const char **snoseq, int *Duplex_El, int *Duplex_Er, int *Loop_E, int *Loop_D, int *u, int *pscd, int *psct, int *pscg, const int penalty, const int threshloop, const int threshLE, const int threshRE, const int threshDE, const int threshD, const int half_stem, const int max_half_stem, const int min_s2, const int max_s2, const int min_s1, const int max_s1, const int min_d1, const int min_d2, const short **Sali1, const short **Sali2) { /* backtrack structure going backwards from i, and forwards from j * return structure in bracket notation with & as separator */ int k, l, *type, *type2, *type3, type4, E, traced, i0, j0, s, n_seq, psc; int traced_r = 0; /* flag for following backtrack in c or r */ char *st1, *st2, *struc; char *struc_loop; n1 = (int)Sali1[0][0]; n2 = (int)Sali2[0][0]; for (s = 0; Sali1[s] != NULL; s++) ; n_seq = s; for (s = 0; Sali2[s] != NULL; s++) ; if (n_seq != s) vrna_message_error("unequal number of sequences in alibacktrack()\n"); st1 = (char *)vrna_alloc(sizeof(char) * (n1 + 1)); st2 = (char *)vrna_alloc(sizeof(char) * (n2 + 1)); type = (int *)vrna_alloc(n_seq * sizeof(int)); type2 = (int *)vrna_alloc(n_seq * sizeof(int)); type3 = (int *)vrna_alloc(n_seq * sizeof(int)); int *indx; int *mLoop; int *cLoop; folden **foldlist, **foldlist_XS; snoexport_fold_arrays(&indx, &mLoop, &cLoop, &foldlist, &foldlist_XS); i0 = i; j0 = j; /* MIN2(i+1,n1); j0=MAX2(j-1,1);!modified */ for (s = 0; s < n_seq; s++) { type[s] = pair[Sali1[s][i]][Sali2[s][j]]; if (type[s] == 0) type[s] = 7; *Duplex_Er += vrna_E_ext_stem(rtype[type[s]], (j > 1) ? Sali2[s][j - 1] : -1, (i < n1) ? Sali1[s][i + 1] : -1, P); /** *** if (i<n1) *Duplex_Er += P->dangle3[rtype[type[s]]][Sali1[s][i+1]]; *** if (j>1) *Duplex_Er += P->dangle5[rtype[type[s]]][Sali2[s][j-1]]; *** if (type[s]>2) *Duplex_Er += P->TerminalAU; **/ } while (i > 0 && j <= n2 - min_d2) { if (!traced_r) { E = r[i][j]; traced = 0; st1[i - 1] = '<'; st2[j - 1] = '>'; for (s = 0; s < n_seq; s++) type[s] = pair[Sali1[s][i]][Sali2[s][j]]; psc = covscore(type, n_seq); for (s = 0; s < n_seq; s++) if (type[s] == 0) type[s] = 7; E += psc; *pscd += psc; for (k = i - 1; k > 0 && (i - k) < MAXLOOP_L; k--) { for (l = j + 1; l <= n2; l++) { int LE; if (i - k + l - j > 2 * MAXLOOP_L - 2) break; if (abs(i - k - l + j) >= ASS) continue; for (s = LE = 0; s < n_seq; s++) { type4 = pair[Sali1[s][k]][Sali2[s][l]]; if (type4 == 0) type4 = 7; LE += E_IntLoop(i - k - 1, l - j - 1, type4, rtype[type[s]], Sali1[s][k + 1], Sali2[s][l - 1], Sali1[s][i - 1], Sali2[s][j + 1], P); } if (E == r[k][l] + LE) { traced = 1; i = k; j = l; *Duplex_Er += LE; break; } } if (traced) break; } if (!traced) { int U = 0; for (s = 0; s < n_seq; s++) U += Sali1[s][i - 2]; U = (U == (n_seq) * 4 ? 1 : 0); if (/* pair[Sali1[i+1]][Sali2[j-1]] && */ /* only U's are allowed */ U && j < max_s1 && j > min_s1 && j > n2 - max_s2 - max_half_stem && j < n2 - min_s2 - half_stem) { int min_k, max_k; max_k = MIN2(n2 - min_s2, j + max_half_stem + 1); min_k = MAX2(j + half_stem + 1, n2 - max_s2); folden *temp; temp = foldlist[j + 1]; while (temp->next) { int psc2, psc3; int k = temp->k; for (s = 0; s < n_seq; s++) { type2[s] = pair[Sali1[s][i - 3]][Sali2[s][k + 1]]; type3[s] = pair[Sali1[s][i - 4]][Sali2[s][k + 1]]; } psc2 = covscore(type2, n_seq); psc3 = covscore(type3, n_seq); if (psc2 > MINPSCORE /*&& pair[Sali1[i-4]][Sali2[k+2]]*/) { /* introduce structure from RNAfold */ if (E == c[i - 3][k + 1] + temp->energy) { *Loop_E = temp->energy; st1[i - 3] = '|'; *u = i - 2; int a, b; /* int fix_ij=indx[k-1+1]+j+1; */ for (a = 0; a < MISMATCH; a++) { for (b = 0; b < MISMATCH; b++) { int ij = indx[k - 1 - a + 1] + j + 1 + b; if (cLoop[ij] == temp->energy) { /* int bla; */ struc_loop = alisnobacktrack_fold_from_pair(snoseq, j + 1 + b, k - a - 1 + 1, psct); a = INF; b = INF; } } } traced = 1; traced_r = 1; i = i - 3; j = k + 1; break; } } if (psc3 > MINPSCORE /*&& pair[Sali1[i-5]][Sali2[k+2]]*/) { /* introduce structure from RNAfold */ if (E == c[i - 4][k + 1] + temp->energy) { *Loop_E = temp->energy; st1[i - 3] = '|'; *u = i - 2; int a, b; /* int fix_ij=indx[k-1+1]+j+1; */ for (a = 0; a < MISMATCH; a++) { for (b = 0; b < MISMATCH; b++) { int ij = indx[k - 1 - a + 1] + j + 1 + b; if (cLoop[ij] == temp->energy) { /* int bla; */ struc_loop = alisnobacktrack_fold_from_pair(snoseq, j + 1 + b, k - a - 1 + 1, psct); a = INF; b = INF; } } } traced = 1; traced_r = 1; i = i - 4; j = k + 1; break; } } /* else if */ temp = temp->next; } /* while temp-> next */ } /* test on j */ } /* traced? */ } /* traced_r? */ else { E = c[i][j]; traced = 0; st1[i - 1] = '<'; st2[j - 1] = '>'; for (s = 0; s < n_seq; s++) type[s] = pair[Sali1[s][i]][Sali2[s][j]]; psc = covscore(type, n_seq); for (s = 0; s < n_seq; s++) if (type[s] == 0) type[s] = 7; E += psc; *pscd += psc; if (!type) vrna_message_error("backtrack failed in fold duplex c"); for (k = i - 1; (i - k) < MAXLOOP_L; k--) { for (l = j + 1; l <= n2; l++) { int LE; if (i - k + l - j > 2 * MAXLOOP_L - 2) break; if (abs(i - k - l + j) >= ASS) continue; for (s = LE = 0; s < n_seq; s++) { type4 = pair[Sali1[s][k]][Sali2[s][l]]; if (type4 == 0) type4 = 7; LE += E_IntLoop(i - k - 1, l - j - 1, type4, rtype[type[s]], Sali1[s][k + 1], Sali2[s][l - 1], Sali1[s][i - 1], Sali2[s][j + 1], P); } if (E == c[k][l] + LE) { traced = 1; i = k; j = l; *Duplex_El += LE; break; } } if (traced) break; } } if (!traced) { for (s = 0; s < n_seq; s++) { int correction; correction = vrna_E_ext_stem(type[s], (i > 1) ? Sali1[s][i - 1] : -1, (j < n2) ? Sali2[s][j + 1] : -1, P); *Duplex_El += correction; E -= correction; /** *** if (i>1) {E -= P->dangle5[type[s]][Sali1[s][i-1]]; *Duplex_El +=P->dangle5[type[s]][Sali1[s][i-1]];} *** if (j<n2) {E -= P->dangle3[type[s]][Sali2[s][j+1]]; *Duplex_El +=P->dangle3[type[s]][Sali2[s][j+1]];} *** if (type[s]>2) {E -= P->TerminalAU; *Duplex_El +=P->TerminalAU;} **/ } if (E != n_seq * P->DuplexInit) vrna_message_error("backtrack failed in fold duplex end"); else break; } } /* if (i>1) i--; */ /* if (j<n2) j++; */ /* struc = (char *) vrna_alloc(i0-i+1+j-j0+1+2); */ /* declare final duplex structure */ struc = (char *)vrna_alloc(i0 - i + 1 + n2 - 1 + 1 + 2); /* declare final duplex structure */ char *struc2; struc2 = (char *)vrna_alloc(n2 + 1); /* char * struct_const; */ for (k = MAX2(i, 1); k <= i0; k++) if (!st1[k - 1]) st1[k - 1] = '.'; /* for (k=j0; k<=j; k++) if (!st2[k-1]) st2[k-1] = struc_loop[k-1];*/ /* '.'; normal */ /* char * struct_const; */ /* struct_const = (char *) vrna_alloc(sizeof(char)*(n2+1)); */ for (k = 1; k <= n2; k++) { if (!st2[k - 1]) st2[k - 1] = struc_loop[k - 1]; /* '.'; */ struc2[k - 1] = st2[k - 1]; /* '.'; */ /* if (k>=j0 && k<=j){ */ /* struct_const[k-1]='x'; */ /* } */ /* else{ */ /* if(k<j0) {struct_const[k-1]='<';} */ /* if(k>j) {struct_const[k-1]='>';} */ /* } */ } /* char duplexseq_1[j0+1]; */ /* char duplexseq_2[n2-j+3]; */ if (j < n2) { char **duplexseq_1, **duplexseq_2; duplexseq_1 = (char **)vrna_alloc((n_seq + 1) * sizeof(char *)); duplexseq_2 = (char **)vrna_alloc((n_seq + 1) * sizeof(char *)); for (s = 0; s < n_seq; s++) { duplexseq_1[s] = (char *)vrna_alloc((j0) * sizeof(char)); /* modfied j0+1 */ duplexseq_2[s] = (char *)vrna_alloc((n2 - j + 2) * sizeof(char)); /* modified j+3 */ strncpy(duplexseq_1[s], snoseq[s], j0 - 1); /* modified j0 */ strcpy(duplexseq_2[s], snoseq[s] + j); /* modified j-1 */ duplexseq_1[s][j0 - 1] = '\0'; /* modified j0 */ duplexseq_2[s][n2 - j + 1] = '\0'; /* modified j+2 */ } duplexseq_1[n_seq] = NULL; duplexseq_2[n_seq] = NULL; duplexT temp; temp = aliduplexfold((const char **)duplexseq_1, (const char **)duplexseq_2); *Loop_D = MIN2(0, -410 + (int)100 * temp.energy * n_seq); if (*Loop_D) { int l1, ibegin, iend, jbegin, jend; l1 = strchr(temp.structure, '&') - temp.structure; ibegin = temp.i - l1; iend = temp.i - 1; jbegin = temp.j; jend = temp.j + (int)strlen(temp.structure) - l1 - 2 - 1; for (k = ibegin + 1; k <= iend + 1; k++) struc2[k - 1] = temp.structure[k - ibegin - 1]; for (k = jbegin + j; k <= jend + j; k++) struc2[k - 1] = temp.structure[l1 + k - j - jbegin + 1]; } for (s = 0; s < n_seq; s++) { free(duplexseq_1[s]); free(duplexseq_2[s]); } free(duplexseq_1); free(duplexseq_2); free(temp.structure); } strcpy(struc, st1 + MAX2(i - 1, 0)); strcat(struc, "&"); /* strcat(struc, st2); */ strncat(struc, struc2 + 5, (int)strlen(struc2) - 10); free(struc2); free(struc_loop); free(st1); free(st2); free(type); free(type2); free(type3); /* free_arrays(); */ return struc; } void Lsnoop_subopt(const char *s1, const char *s2, int delta, int w, const int penalty, const int threshloop, const int threshLE, const int threshRE, const int threshDE, const int threshTE, const int threshSE, const int threshD, const int distance, const int half_stem, const int max_half_stem, const int min_s2, const int max_s2, const int min_s1, const int max_s1, const int min_d1, const int min_d2, const int alignment_length, const char *name, const int fullStemEnergy) { int min_colonne = INF; int max_pos; int max; max = INF; /* int temp; */ /* int nsubopt=10; */ n1 = (int)strlen(s1); n2 = (int)strlen(s2); int *position; position = (int *)vrna_alloc((n1 + 3) * sizeof(int)); /* int Eminj, Emin_l; */ int i, j; /* l1, Emin=INF, i_min=0, j_min=0; */ /* char *struc; */ /* snoopT mfe; */ int *indx; int *mLoop; int *cLoop; folden **foldlist, **foldlist_XS; int Duplex_El, Duplex_Er; int Loop_D; /* int u; */ int Loop_E; vrna_md_t md; Duplex_El = 0; Duplex_Er = 0; Loop_E = 0, Loop_D = 0; snoexport_fold_arrays(&indx, &mLoop, &cLoop, &foldlist, &foldlist_XS); set_model_details(&md); if ((!P) || (fabs(P->temperature - temperature) > 1e-6)) { snoupdate_fold_params(); if (P) free(P); P = vrna_params(&md); make_pair_matrix(); } lc = (int **)vrna_alloc(sizeof(int *) * (5)); lr = (int **)vrna_alloc(sizeof(int *) * (5)); for (i = 0; i < 5; i++) { lc[i] = (int *)vrna_alloc(sizeof(int) * (n2 + 1)); lr[i] = (int *)vrna_alloc(sizeof(int) * (n2 + 1)); for (j = n2; j > -1; j--) { lc[i][j] = INF; lr[i][j] = INF; } } encode_seqs(s1, s2); for (i = 1; i <= n1; i++) { int idx = i % 5; int idx_1 = (i - 1) % 5; int idx_2 = (i - 2) % 5; int idx_3 = (i - 3) % 5; int idx_4 = (i - 4) % 5; for (j = n2 - min_d2; j > min_d1; j--) { int type, type2, k; type = pair[S1[i]][S2[j]]; lc[idx][j] = (type) ? P->DuplexInit + 2 * penalty : INF; lr[idx][j] = INF; if (!type) continue; if ( /*pair[S1[i+1]][S2[j-1]] && check that we have a solid base stack after the mLoop */ j < max_s1 && j > min_s1 && j > n2 - max_s2 - max_half_stem && j < n2 - min_s2 - half_stem && S1[i - 2] == 4) { /*constraint on s2 and i*/ int min_k, max_k; max_k = MIN2(n2 - min_s2, j + max_half_stem + 1); min_k = MAX2(j + half_stem + 1, n2 - max_s2); for (k = min_k; k <= max_k; k++) { if (mLoop[indx[k - 1] + j + 1] < 0) { } if (pair[S1[i - 3]][S2[k]] /*genau zwei ungepaarte nucleotiden --NU--*/ && mLoop[indx[k - 1] + j + 1] < threshloop) lr[idx][j] = MIN2(lr[idx][j], lc[idx_3][k] + mLoop[indx[k - 1] + j + 1]); else if (pair[S1[i - 4]][S2[k]] && mLoop[indx[k - 1] + j + 1] < threshloop) /*--NUN--*/ lr[idx][j] = MIN2(lr[idx][j], lc[idx_4][k] + mLoop[indx[k - 1] + j + 1]); } } /* dangle 5'SIDE relative to the mRNA */ lc[idx][j] += vrna_E_ext_stem(type, (i > 1) ? SS1[i - 1] : -1, (j < n2) ? SS2[j + 1] : -1, P); /** *** if (i>1) lc[idx][j] += P->dangle5[type][SS1[i-1]]; *** if (j<n2) lc[idx][j] += P->dangle3[type][SS2[j+1]]; *** if (type>2) lc[idx][j] += P->TerminalAU; **/ if (j < n2 && i > 1) { type2 = pair[S1[i - 1]][S2[j + 1]]; if (type2 > 0) { lc[idx][j] = MIN2(lc[idx_1][j + 1] + E_IntLoop(0, 0, type2, rtype[type], SS1[i], SS2[j], SS1[i - 1], SS2[j + 1], P) + 2 * penalty, lc[idx][j]); lr[idx][j] = MIN2(lr[idx_1][j + 1] + E_IntLoop(0, 0, type2, rtype[type], SS1[i], SS2[j], SS1[i - 1], SS2[j + 1], P) + 2 * penalty, lr[idx][j]); } } if (j < n2 - 1 && i > 2) { type2 = pair[S1[i - 2]][S2[j + 2]]; if (type2 > 0) { lc[idx][j] = MIN2(lc[idx_2][j + 2] + E_IntLoop(1, 1, type2, rtype[type], SS1[i - 1], SS2[j + 1], SS1[i - 1], SS2[j + 1], P) + 4 * penalty, lc[idx][j]); lr[idx][j] = MIN2(lr[idx_2][j + 2] + E_IntLoop(1, 1, type2, rtype[type], SS1[i - 1], SS2[j + 1], SS1[i - 1], SS2[j + 1], P) + 4 * penalty, lr[idx][j]); } } if (j < n2 - 2 && i > 3) { type2 = pair[S1[i - 3]][S2[j + 3]]; if (type2 > 0) { lc[idx][j] = MIN2(lc[idx_3][j + 3] + E_IntLoop(2, 2, type2, rtype[type], SS1[i - 2], SS2[j + 2], SS1[i - 1], SS2[j + 1], P) + 6 * penalty, lc[idx][j]); lr[idx][j] = MIN2(lr[idx_3][j + 3] + E_IntLoop(2, 2, type2, rtype[type], SS1[i - 2], SS2[j + 2], SS1[i - 1], SS2[j + 1], P) + 6 * penalty, lr[idx][j]); } } /** *** (type>2?P->TerminalAU:0)+(i<(n1)?P->dangle3[rtype[type]][SS1[i+1]]+penalty:0)+(j>1?P->dangle5[rtype[type]][SS2[j-1]]+penalty:0) **/ min_colonne = MIN2(lr[idx][j] + vrna_E_ext_stem(rtype[type], (j > 1) ? SS2[j - 1] : -1, (i < n1) ? SS1[i + 1] : -1, P), min_colonne); } position[i] = min_colonne; if (max >= min_colonne) { max = min_colonne; max_pos = i; } min_colonne = INF; } free(S1); free(S2); free(SS1); free(SS2); if (max < threshTE) { find_max_snoop(s1, s2, max, alignment_length, position, delta, distance, penalty, threshloop, threshLE, threshRE, threshDE, threshTE, threshSE, threshD, half_stem, max_half_stem, min_s2, max_s2, min_s1, max_s1, min_d1, min_d2, name, fullStemEnergy); } for (i = 1; i < 5; i++) { free(lc[i]); free(lr[i]); } free(lc[0]); free(lr[0]); free(lc); free(lr); free(position); } void Lsnoop_subopt_list(const char *s1, const char *s2, int delta, int w, const int penalty, const int threshloop, const int threshLE, const int threshRE, const int threshDE, const int threshTE, const int threshSE, const int threshD, const int distance, const int half_stem, const int max_half_stem, const int min_s2, const int max_s2, const int min_s1, const int max_s1, const int min_d1, const int min_d2, const int alignment_length, const char *name, const int fullStemEnergy) { int min_colonne = INF; int max_pos; int max; max = INF; /* int temp; */ /* int nsubopt=10; */ n1 = (int)strlen(s1); n2 = (int)strlen(s2); int *position; position = (int *)vrna_alloc((n1 + 3) * sizeof(int)); /* int Eminj, Emin_l; */ int i, j;/* l1, Emin=INF, i_min=0, j_min=0; */ /* char *struc; */ /* snoopT mfe; */ int *indx; int *mLoop; int *cLoop; folden **foldlist, **foldlist_XS; int Duplex_El, Duplex_Er; int Loop_D; /* int u; */ int Loop_E; vrna_md_t md; Duplex_El = 0; Duplex_Er = 0; Loop_E = 0, Loop_D = 0; snoexport_fold_arrays(&indx, &mLoop, &cLoop, &foldlist, &foldlist_XS); set_model_details(&md); if ((!P) || (fabs(P->temperature - temperature) > 1e-6)) { snoupdate_fold_params(); if (P) free(P); P = vrna_params(&md); make_pair_matrix(); } lpair = (int **)vrna_alloc(sizeof(int *) * (6)); lc = (int **)vrna_alloc(sizeof(int *) * (6)); lr = (int **)vrna_alloc(sizeof(int *) * (6)); for (i = 0; i < 6; i++) { lc[i] = (int *)vrna_alloc(sizeof(int) * (n2 + 1)); lr[i] = (int *)vrna_alloc(sizeof(int) * (n2 + 1)); lpair[i] = (int *)vrna_alloc(sizeof(int) * (n2 + 1)); for (j = n2; j > -1; j--) { lc[i][j] = INF; lr[i][j] = INF; lpair[i][j] = 0; } } encode_seqs(s1, s2); int lim_maxj = n2 - min_d2; int lim_minj = min_d1; int lim_maxi = n1; for (i = 5; i <= lim_maxi; i++) { int idx = i % 5; int idx_1 = (i - 1) % 5; int idx_2 = (i - 2) % 5; int idx_3 = (i - 3) % 5; int idx_4 = (i - 4) % 5; for (j = lim_maxj; j > lim_minj; j--) { int type, type2;/* E, k,l; */ type = pair[S1[i]][S2[j]]; lpair[idx][j] = type; lc[idx][j] = (type) ? P->DuplexInit + 2 * penalty : INF; lr[idx][j] = INF; if (!type) continue; if ( /*pair[S1[i+1]][S2[j-1]] && Be sure it binds*/ j < max_s1 && j > min_s1 && j > n2 - max_s2 - max_half_stem && j < n2 - min_s2 - half_stem && S1[i - 2] == 4) { /*constraint on s2 and i*/ int min_k, max_k; max_k = MIN2(n2 - min_s2, j + max_half_stem + 1); min_k = MAX2(j + half_stem + 1, n2 - max_s2); folden *temp; temp = foldlist[j + 1]; while (temp->next) { int k = temp->k; /* if(k >= min_k-1 && k < max_k){ comment to recover normal behaviour */ if (lpair[idx_3][k + 1] /*&& lpair[idx_4][k+2]*/) lr[idx][j] = MIN2(lr[idx][j], lc[idx_3][k + 1] + temp->energy); /*--NU--*/ /*else*/ if (lpair[idx_4][k + 1]) /*--NUN--*/ lr[idx][j] = MIN2(lr[idx][j], lc[idx_4][k + 1] + temp->energy); /* } */ temp = temp->next; } } /* dangle 5'SIDE relative to the mRNA */ lc[idx][j] += vrna_E_ext_stem(type, SS1[i - 1], SS2[j + 1], P); /** *** lc[idx][j] += P->dangle5[type][SS1[i-1]]; *** lc[idx][j] += P->dangle3[type][SS2[j+1]]; *** if (type>2) lc[idx][j] += P->TerminalAU; **/ /* if(j<n2 && i>1){ */ /* type2=pair[S1[i-1]][S2[j+1]]; */ type2 = lpair[idx_1][j + 1]; if (type2 > 0) { lc[idx][j] = MIN2(lc[idx_1][j + 1] + E_IntLoop(0, 0, type2, rtype[type], SS1[i], SS2[j], SS1[i - 1], SS2[j + 1], P) + 2 * penalty, lc[idx][j]); lr[idx][j] = MIN2(lr[idx_1][j + 1] + E_IntLoop(0, 0, type2, rtype[type], SS1[i], SS2[j], SS1[i - 1], SS2[j + 1], P) + 2 * penalty, lr[idx][j]); } /* } */ /* if(j<n2-1 && i>2){ */ /* type2=pair[S1[i-2]][S2[j+2]]; */ type2 = lpair[idx_2][j + 2]; if (type2 > 0) { lc[idx][j] = MIN2(lc[idx_2][j + 2] + E_IntLoop(1, 1, type2, rtype[type], SS1[i - 1], SS2[j + 1], SS1[i - 1], SS2[j + 1], P), lc[idx][j]); lr[idx][j] = MIN2(lr[idx_2][j + 2] + E_IntLoop(1, 1, type2, rtype[type], SS1[i - 1], SS2[j + 1], SS1[i - 1], SS2[j + 1], P), lr[idx][j]); /* } */ } /* if(j<n2-2 && i>3){ */ /* type2 = pair[S1[i-3]][S2[j+3]]; */ type2 = lpair[idx_3][j + 3]; if (type2 > 0) { lc[idx][j] = MIN2(lc[idx_3][j + 3] + E_IntLoop(2, 2, type2, rtype[type], SS1[i - 2], SS2[j + 2], SS1[i - 1], SS2[j + 1], P) + 6 * penalty, lc[idx][j]); lr[idx][j] = MIN2(lr[idx_3][j + 3] + E_IntLoop(2, 2, type2, rtype[type], SS1[i - 2], SS2[j + 2], SS1[i - 1], SS2[j + 1], P) + 6 * penalty, lr[idx][j]); /* } */ } /* min_colonne=MIN2(lr[idx][j]+(type>2?P->TerminalAU:0)+P->dangle3[rtype[type]][SS1[i+1]]+P->dangle5[rtype[type]][SS2[j-1]], min_colonne); */ int bla; bla = lr[idx][j] + vrna_E_ext_stem(rtype[type], SS2[j - 1], SS1[i + 1], P) + 2 * penalty; min_colonne = MIN2(bla, min_colonne); } position[i] = min_colonne; if (max >= min_colonne) { max = min_colonne; max_pos = i; } min_colonne = INF; } free(S1); free(S2); free(SS1); free(SS2); if (max < threshTE) { find_max_snoop(s1, s2, max, alignment_length, position, delta, distance, penalty, threshloop, threshLE, threshRE, threshDE, threshTE, threshSE, threshD, half_stem, max_half_stem, min_s2, max_s2, min_s1, max_s1, min_d1, min_d2, name, fullStemEnergy); } for (i = 1; i < 6; i++) { free(lc[i]); free(lr[i]); free(lpair[i]); } free(lc[0]); free(lr[0]); free(lpair[0]); free(lc); free(lr); free(lpair); free(position); } PRIVATE void find_max_snoop(const char *s1, const char *s2, const int max, const int alignment_length, const int *position, const int delta, const int distance, const int penalty, const int threshloop, const int threshLE, const int threshRE, const int threshDE, const int threshTE, const int threshSE, const int threshD, const int half_stem, const int max_half_stem, const int min_s2, const int max_s2, const int min_s1, const int max_s1, const int min_d1, const int min_d2, const char *name, const int fullStemEnergy) { int count = 0; int pos = n1 + 1; int threshold = MIN2(threshTE, max + delta); /* printf("threshTE %d max %d\n", threshTE, max); */ /* #pragma omp parallel for */ /* for(pos=n1+1;pos>distance;pos--){ */ while (pos-- > 5) { int temp_min = 0; if (position[pos] < (threshold)) { int search_range; search_range = distance + 1; while (--search_range) if (position[pos - search_range] <= position[pos - temp_min]) temp_min = search_range; pos -= temp_min; int begin = MAX2(6, pos - alignment_length + 1); char *s3 = (char *)vrna_alloc(sizeof(char) * (pos - begin + 3 + 12)); strcpy(s3, "NNNNN"); strncat(s3, (s1 + begin - 1), pos - begin + 2); strcat(s3, "NNNNN\0"); /* printf("%s s3\n", s3); */ snoopT test; test = snoopfold(s3, s2, penalty, threshloop, threshLE, threshRE, threshDE, threshD, half_stem, max_half_stem, min_s2, max_s2, min_s1, max_s1, min_d1, min_d2, fullStemEnergy); if (test.energy == INF) { free(s3); continue; } if (test.Duplex_El > threshLE * 0.01 || test.Duplex_Er > threshRE * 0.01 || test.Loop_D > threshD * 0.01 || (test.Duplex_Er + test.Duplex_El) > threshDE * 0.01 || (test.Duplex_Er + test.Duplex_El + test.Loop_E + test.Loop_D + 410) > threshSE * 0.01) { free(test.structure); free(s3); continue; } int l1; l1 = strchr(test.structure, '&') - test.structure; int shift = 0; if (test.i > (int)strlen(s3) - 10) { test.i--; l1--; } if (test.i - l1 < 0) { l1--; shift++; } char *target_struct = (char *)vrna_alloc(sizeof(char) * (strlen(test.structure) + 1)); strncpy(target_struct, test.structure + shift, l1); strncat(target_struct, test.structure + (strchr(test.structure, '&') - test.structure), (int)strlen(test.structure) - (strchr(test.structure, '&') - test. structure)); strcat(target_struct, "\0"); char *target; target = (char *)vrna_alloc(l1 + 1); strncpy(target, (s3 + test.i + 5 - l1), l1); target[l1] = '\0'; char *s4; s4 = (char *)vrna_alloc(sizeof(char) * (strlen(s2) - 9)); strncpy(s4, s2 + 5, (int)strlen(s2) - 10); s4[(int)strlen(s2) - 10] = '\0'; printf( "%s %3d,%-3d;%3d : %3d,%-3d (%5.2f = %5.2f + %5.2f + %5.2f + %5.2f + 4.1 ) (%5.2f) \n%s&%s\n", target_struct, begin + test.i - 5 - l1, begin + test.i - 6, begin + test.u - 6, test.j + 1, test.j + (int)(strrchr(test.structure, '>') - strchr(test.structure, '>')) + 1, test.Loop_D + test.Duplex_El + test.Duplex_Er + test.Loop_E + 4.10, test.Duplex_El, test.Duplex_Er, test.Loop_E, test.Loop_D, test.fullStemEnergy, target, s4); if (name) { char *temp_seq; char *temp_struc; char *psoutput; temp_seq = (char *)vrna_alloc(sizeof(char) * (l1 + n2 - 9)); temp_struc = (char *)vrna_alloc(sizeof(char) * (l1 + n2 - 9)); strcpy(temp_seq, target); strcat(temp_seq, s4); strncpy(temp_struc, target_struct, l1); strcat(temp_struc, target_struct + l1 + 1); temp_seq[n2 + l1 - 10] = '\0'; temp_struc[n2 + l1 - 10] = '\0'; cut_point = l1 + 1; psoutput = vrna_strdup_printf("sno_%d_u_%d_%s.ps", count, begin + test.u - 6, name); PS_rna_plot_snoop_a(temp_seq, temp_struc, psoutput, NULL, NULL); cut_point = -1; free(temp_seq); free(temp_struc); free(psoutput); count++; /* free(psoutput); */ } free(s4); free(test.structure); free(target_struct); free(target); free(s3); } } } snoopT snoopfold(const char *s1, const char *s2, const int penalty, const int threshloop, const int threshLE, const int threshRE, const int threshDE, const int threshD, const int half_stem, const int max_half_stem, const int min_s2, const int max_s2, const int min_s1, const int max_s1, const int min_d1, const int min_d2, const int fullStemEnergy) { /* int Eminj, Emin_l; */ int i, j, l1, Emin = INF, i_min = 0, j_min = 0; char *struc; snoopT mfe; int *indx; int *mLoop; int *cLoop; folden **foldlist, **foldlist_XS; int Duplex_El, Duplex_Er; int Loop_D; int u; int Loop_E; vrna_md_t md; Duplex_El = 0; Duplex_Er = 0; Loop_E = 0, Loop_D = 0; snoexport_fold_arrays(&indx, &mLoop, &cLoop, &foldlist, &foldlist_XS); n1 = (int)strlen(s1); n2 = (int)strlen(s2); set_model_details(&md); if ((!P) || (fabs(P->temperature - temperature) > 1e-6)) { snoupdate_fold_params(); if (P) free(P); P = vrna_params(&md); make_pair_matrix(); } c = (int **)vrna_alloc(sizeof(int *) * (n1 + 1)); r = (int **)vrna_alloc(sizeof(int *) * (n1 + 1)); for (i = 0; i <= n1; i++) { c[i] = (int *)vrna_alloc(sizeof(int) * (n2 + 1)); r[i] = (int *)vrna_alloc(sizeof(int) * (n2 + 1)); for (j = n2; j > -1; j--) { c[i][j] = INF; r[i][j] = INF; } } encode_seqs(s1, s2); for (i = 6; i <= n1 - 5; i++) { for (j = n2 - min_d2; j > min_d1; j--) { int type, type2, E, k, l; type = pair[S1[i]][S2[j]]; c[i][j] = (type) ? P->DuplexInit : INF; if (!type) continue; if (/* pair[S1[i+1]][S2[j-1]] && */ j < max_s1 && j > min_s1 && j > n2 - max_s2 - max_half_stem && j < n2 - min_s2 - half_stem && S1[i - 2] == 4) { /*constraint on s2 and i*/ int min_k, max_k; max_k = MIN2(n2 - min_s2, j + max_half_stem); min_k = MAX2(j + half_stem, n2 - max_s2); folden *temp; temp = foldlist[j + 1]; while (temp->next) { int k = temp->k; /* if(k >= min_k-1 && k < max_k){ uncomment to recovernormal behaviour */ if (pair[S1[i - 3]][S2[k + 1]] /*&& pair[S1[i-4]][S2[k+2]]*/) r[i][j] = MIN2(r[i][j], c[i - 3][k + 1] + temp->energy); /*else*/ if (pair[S1[i - 4]][S2[k + 1]] /*&& pair[S1[i-5]][S2[k+2]]*/) r[i][j] = MIN2(r[i][j], c[i - 4][k + 1] + temp->energy); /* } */ temp = temp->next; } } /* dangle 5'SIDE relative to the mRNA */ /** *** c[i][j] += P->dangle5[type][SS1[i-1]]; *** c[i][j] += P->dangle3[type][SS2[j+1]]; *** if (type>2) c[i][j] += P->TerminalAU; **/ c[i][j] += vrna_E_ext_stem(type, SS1[i - 1], SS2[j + 1], P); for (k = i - 1; k > 0 && (i - k) < MAXLOOP_L; k--) { for (l = j + 1; l <= n2; l++) { if (i - k + l - j > 2 * MAXLOOP_L - 2) break; if (abs(i - k - l + j) >= ASS) continue; type2 = pair[S1[k]][S2[l]]; if (!type2) continue; E = E_IntLoop(i - k - 1, l - j - 1, type2, rtype[type], SS1[k + 1], SS2[l - 1], SS1[i - 1], SS2[j + 1], P); c[i][j] = MIN2(c[i][j], c[k][l] + E + (i - k + l - j) * penalty); r[i][j] = MIN2(r[i][j], r[k][l] + E + (i - k + l - j) * penalty); } } E = r[i][j]; /** *** if (i<n1) E += P->dangle3[rtype[type]][SS1[i+1]]; *** if (j>1) E += P->dangle5[rtype[type]][SS2[j-1]]; *** f (type>2) E += P->TerminalAU; **/ E += vrna_E_ext_stem(rtype[type], (j > 1) ? SS2[j - 1] : -1, (i < n1) ? SS1[i + 1] : -1, P); if (E < Emin) { Emin = E; i_min = i; j_min = j; } } } if (Emin > 0) { printf("no target found under the constraints chosen\n"); for (i = 0; i <= n1; i++) { free(r[i]); free(c[i]); } free(c); free(r); free(S1); free(S2); free(SS1); free(SS2); mfe.energy = INF; return mfe; } struc = snoop_backtrack(i_min, j_min, s2, &Duplex_El, &Duplex_Er, &Loop_E, &Loop_D, &u, penalty, threshloop, threshLE, threshRE, threshDE, threshD, half_stem, max_half_stem, min_s2, max_s2, min_s1, max_s1, min_d1, min_d2); /* if (i_min<n1-5) i_min++; */ /* if (j_min>1 ) j_min--; */ l1 = strchr(struc, '&') - struc; mfe.i = i_min - 5; mfe.j = j_min - 5; mfe.u = u - 5; mfe.Duplex_Er = (float)Duplex_Er / 100; mfe.Duplex_El = (float)Duplex_El / 100; mfe.Loop_D = (float)Loop_D / 100; mfe.Loop_E = (float)Loop_E / 100; mfe.energy = (float)Emin / 100; mfe.fullStemEnergy = (float)fullStemEnergy / 100; mfe.structure = struc; if (!delay_free) { for (i = 0; i <= n1; i++) { free(r[i]); free(c[i]); } free(c); free(r); free(S1); free(S2); free(SS1); free(SS2); } return mfe; } PRIVATE int snoopfold_XS_fill(const char *s1, const char *s2, const int **access_s1, const int penalty, const int threshloop, const int threshLE, const int threshRE, const int threshDE, const int threshD, const int half_stem, const int max_half_stem, const int min_s2, const int max_s2, const int min_s1, const int max_s1, const int min_d1, const int min_d2) { /* int Eminj, Emin_l; */ int i, j, Emin = INF, i_min = 0, j_min = 0; /* char *struc; */ /* snoopT mfe; */ int *indx; int *mLoop; int *cLoop; folden **foldlist, **foldlist_XS; int Duplex_El, Duplex_Er; int Loop_D; /* int u; */ int Loop_E; vrna_md_t md; Duplex_El = 0; Duplex_Er = 0; Loop_E = 0, Loop_D = 0; snoexport_fold_arrays(&indx, &mLoop, &cLoop, &foldlist, &foldlist_XS); n1 = (int)strlen(s1); n2 = (int)strlen(s2); set_model_details(&md); if ((!P) || (fabs(P->temperature - temperature) > 1e-6)) { snoupdate_fold_params(); if (P) free(P); P = vrna_params(&md); make_pair_matrix(); } c_fill = (int **)vrna_alloc(sizeof(int *) * (n1 + 1)); r_fill = (int **)vrna_alloc(sizeof(int *) * (n1 + 1)); for (i = 0; i <= n1; i++) { c_fill[i] = (int *)vrna_alloc(sizeof(int) * (n2 + 1)); r_fill[i] = (int *)vrna_alloc(sizeof(int) * (n2 + 1)); for (j = n2; j > -1; j--) { c_fill[i][j] = INF; r_fill[i][j] = INF; } } encode_seqs(s1, s2); int di[5]; di[0] = 0; for (i = 6; i <= n1 - 5; i++) { di[1] = access_s1[5][i] - access_s1[4][i - 1]; di[2] = access_s1[5][i - 1] - access_s1[4][i - 2] + di[1]; di[3] = access_s1[5][i - 2] - access_s1[4][i - 3] + di[2]; di[4] = access_s1[5][i - 3] - access_s1[4][i - 4] + di[3]; di[1] = MIN2(di[1], 165); di[2] = MIN2(di[2], 330); di[3] = MIN2(di[3], 495); di[4] = MIN2(di[4], 660); for (j = n2 - min_d2; j > min_d1; j--) { int type, type2, E, k, l; type = pair[S1[i]][S2[j]]; c_fill[i][j] = (type) ? P->DuplexInit : INF; if (!type) continue; if (/* pair[S1[i+1]][S2[j-1]] && */ j < max_s1 && j > min_s1 && j > n2 - max_s2 - max_half_stem && j < n2 - min_s2 - half_stem && S1[i - 2] == 4) { /*constraint on s2 and i*/ int min_k, max_k; max_k = MIN2(n2 - min_s2, j + max_half_stem); min_k = MAX2(j + half_stem, n2 - max_s2); folden *temp; temp = foldlist[j + 1]; while (temp->next) { int k = temp->k; /* if(k >= min_k-1 && k < max_k){ uncomment to recovernormal behaviour */ if (pair[S1[i - 3]][S2[k + 1]] /*&& pair[S1[i-4]][S2[k+2]]*/) r_fill[i][j] = MIN2(r_fill[i][j], c_fill[i - 3][k + 1] + temp->energy + di[3]); /*else*/ if (pair[S1[i - 4]][S2[k + 1]] /*&& pair[S1[i-5]][S2[k+2]]*/) r_fill[i][j] = MIN2(r_fill[i][j], c_fill[i - 4][k + 1] + temp->energy + di[4]); /* } */ temp = temp->next; } } /* dangle 5'SIDE relative to the mRNA */ /** *** c_fill[i][j] += P->dangle5[type][SS1[i-1]]; *** c_fill[i][j] += P->dangle3[type][SS2[j+1]]; *** if (type>2) c_fill[i][j] += P->TerminalAU; **/ c_fill[i][j] += vrna_E_ext_stem(type, SS1[i - 1], SS2[j + 1], P); for (k = i - 1; k > 0 && (i - k) < MAXLOOP_L; k--) { for (l = j + 1; l <= n2; l++) { if (i - k + l - j > 2 * MAXLOOP_L - 2) break; if (abs(i - k - l + j) >= ASS) continue; type2 = pair[S1[k]][S2[l]]; if (!type2) continue; E = E_IntLoop(i - k - 1, l - j - 1, type2, rtype[type], SS1[k + 1], SS2[l - 1], SS1[i - 1], SS2[j + 1], P); c_fill[i][j] = MIN2(c_fill[i][j], c_fill[k][l] + E + di[i - k]); r_fill[i][j] = MIN2(r_fill[i][j], r_fill[k][l] + E + di[i - k]); } } E = r_fill[i][j]; /** *** if (i<n1) E += P->dangle3[rtype[type]][SS1[i+1]]; *** if (j>1) E += P->dangle5[rtype[type]][SS2[j-1]]; *** if (type>2) E += P->TerminalAU; **/ E += vrna_E_ext_stem(rtype[type], (j > 1) ? SS2[j - 1] : -1, (i < n1) ? SS1[i + 1] : -1, P); if (E < Emin) { Emin = E; i_min = i; j_min = j; } } } return Emin; } PUBLIC snoopT * snoop_subopt(const char *s1, const char *s2, int delta, int w, const int penalty, const int threshloop, const int threshLE, const int threshRE, const int threshDE, const int threshTE, const int threshSE, const int threshD, const int distance, const int half_stem, const int max_half_stem, const int min_s2, const int max_s2, const int min_s1, const int max_s1, const int min_d1, const int min_d2, const int fullStemEnergy) { /* printf("%d %d\n", min_s2, max_s2); */ int i, j, n1, n2, E, n_subopt = 0, n_max; char *struc; snoopT mfe; snoopT *subopt; int thresh; int Duplex_El, Duplex_Er, Loop_E; int Loop_D; Duplex_El = 0; Duplex_Er = 0; Loop_E = 0; Loop_D = 0; int u; u = 0; n_max = 16; subopt = (snoopT *)vrna_alloc(n_max * sizeof(snoopT)); delay_free = 1; mfe = snoopfold(s1, s2, penalty, threshloop, threshLE, threshRE, threshDE, threshD, half_stem, max_half_stem, min_s2, max_s2, min_s1, max_s1, min_d1, min_d2, fullStemEnergy); if (mfe.energy > 0) { free(subopt); delay_free = 0; return NULL; } thresh = MIN2((int)((mfe.Duplex_Er + mfe.Duplex_El + mfe.Loop_E) * 100 + 0.1 + 410) + delta, threshTE); /* subopt[n_subopt++]=mfe; */ free(mfe.structure); n1 = (int)strlen(s1); n2 = (int)strlen(s2); for (i = n1; i > 0; i--) { for (j = 1; j <= n2; j++) { int type, Ed; type = pair[S2[j]][S1[i]]; if (!type) continue; E = Ed = r[i][j]; /** *** if (i<n1) Ed += P->dangle3[type][SS1[i+1]]; *** if (j>1) Ed += P->dangle5[type][SS2[j-1]]; *** if (type>2) Ed += P->TerminalAU; **/ Ed += vrna_E_ext_stem(type, (j > 1) ? SS2[j - 1] : -1, (i < n1) ? SS1[i + 1] : -1, P); if (Ed > thresh) continue; /* too keep output small, remove hits that are dominated by a * better one close (w) by. For simplicity we do test without * adding dangles, which is slightly inaccurate. */ /* w=1; */ /* for (ii=MAX2(i-w,1); (ii<=MIN2(i+w,n1)) && type; ii++) { */ /* for (jj=MAX2(j-w,1); jj<=MIN2(j+w,n2); jj++) */ /* if (r[ii][jj]<E) {type=0; break;} */ /* } */ if (!type) continue; struc = snoop_backtrack(i, j, s2, &Duplex_El, &Duplex_Er, &Loop_E, &Loop_D, &u, penalty, threshloop, threshLE, threshRE, threshDE, threshD, half_stem, max_half_stem, min_s2, max_s2, min_s1, max_s1, min_d1, min_d2); if (Duplex_Er > threshRE || Duplex_El > threshLE || Loop_D > threshD || (Duplex_Er + Duplex_El) > threshDE || (Duplex_Er + Duplex_El + Loop_E) > threshTE || (Duplex_Er + Duplex_El + Loop_E + Loop_D + 410) > threshSE) { /* printf(" Duplex_Er %d threshRE %d Duplex_El %d threshLE %d \n" */ /* " Duplex_Er + Duplex_El %d threshDE %d \n" */ /* " Duplex_Er + Duplex_El + Loop_E %d threshTE %d \n" */ /* " Duplex_Er + Duplex_El + Loop_E + Loop_D %d threshSE %d \n", */ /* Duplex_Er , threshRE , Duplex_El ,threshLE, */ /* Duplex_Er + Duplex_El, threshDE, */ /* Duplex_Er + Duplex_El+ Loop_E , threshTE, */ /* Duplex_Er + Duplex_El+ Loop_E + Loop_D, threshSE); */ Duplex_Er = 0; Duplex_El = 0; Loop_E = 0; Loop_D = 0; u = 0, free(struc); continue; } if (n_subopt + 1 >= n_max) { n_max *= 2; subopt = (snoopT *)vrna_realloc(subopt, n_max * sizeof(snoopT)); } subopt[n_subopt].i = i - 5; subopt[n_subopt].j = j - 5; subopt[n_subopt].u = u - 5; subopt[n_subopt].Duplex_Er = Duplex_Er * 0.01; subopt[n_subopt].Duplex_El = Duplex_El * 0.01; subopt[n_subopt].Loop_E = Loop_E * 0.01; subopt[n_subopt].Loop_D = Loop_D * 0.01; subopt[n_subopt].energy = (Duplex_Er + Duplex_El + Loop_E + Loop_D + 410) * 0.01; subopt[n_subopt].fullStemEnergy = (float)fullStemEnergy * 0.01; subopt[n_subopt++].structure = struc; Duplex_Er = 0; Duplex_El = 0; Loop_E = 0; Loop_D = 0; u = 0; } } for (i = 0; i <= n1; i++) { free(c[i]); free(r[i]); } free(c); free(r); free(S1); free(S2); free(SS1); free(SS2); delay_free = 0; if (snoop_subopt_sorted) qsort(subopt, n_subopt, sizeof(snoopT), compare); subopt[n_subopt].i = 0; subopt[n_subopt].j = 0; subopt[n_subopt].structure = NULL; return subopt; } PUBLIC void snoop_subopt_XS(const char *s1, const char *s2, const int **access_s1, int delta, int w, const int penalty, const int threshloop, const int threshLE, const int threshRE, const int threshDE, const int threshTE, const int threshSE, const int threshD, const int distance, const int half_stem, const int max_half_stem, const int min_s2, const int max_s2, const int min_s1, const int max_s1, const int min_d1, const int min_d2, const int alignment_length, const char *name, const int fullStemEnergy) { /* printf("%d %d\n", min_s2, max_s2); */ int i, j, E, n_max; /* char *struc; */ /* snoopT mfe; */ int thresh; int Duplex_El, Duplex_Er, Loop_E; int Loop_D; Duplex_El = 0; Duplex_Er = 0; Loop_E = 0; Loop_D = 0; int u; u = 0; n_max = 16; delay_free = 1; int Emin = snoopfold_XS_fill(s1, s2, access_s1, penalty, threshloop, threshLE, threshRE, threshDE, threshD, half_stem, max_half_stem, min_s2, max_s2, min_s1, max_s1, min_d1, min_d2); if (Emin > 0) delay_free = 0; thresh = MIN2(-100, threshTE + alignment_length * 30); /* n1=(int)strlen(s1); */ /* n2=(int)strlen(s2); */ int n3 = (int)strlen(s1); int n4 = (int)strlen(s2); S1_fill = (short *)vrna_alloc(sizeof(short) * (n3 + 2)); S2_fill = (short *)vrna_alloc(sizeof(short) * (n4 + 2)); SS1_fill = (short *)vrna_alloc(sizeof(short) * (n3 + 1)); SS2_fill = (short *)vrna_alloc(sizeof(short) * (n4 + 1)); memcpy(S1_fill, S1, sizeof(short) * n3 + 2); memcpy(S2_fill, S2, sizeof(short) * n4 + 2); memcpy(SS1_fill, SS1, sizeof(short) * n3 + 1); memcpy(SS2_fill, SS2, sizeof(short) * n4 + 1); free(S1); free(S2); free(SS1); free(SS2); int count = 0; for (i = n3 - 5; i > 0; i--) { for (j = 1; j <= n4; j++) { int type, Ed; type = pair[S2_fill[j]][S1_fill[i]]; if (!type) continue; E = Ed = r_fill[i][j]; /** ***if (i<n3) Ed += P->dangle3[type][SS1_fill[i+1]]; ***if (j>1) Ed += P->dangle5[type][SS2_fill[j-1]]; ***if (type>2) Ed += P->TerminalAU; **/ Ed += vrna_E_ext_stem(type, (j > 1) ? SS2[j - 1] : -1, (i < n3) ? SS1[i + 1] : -1, P); if (Ed > thresh) continue; /* to keep output small, remove hits that are dominated by a * better one close (w) by. For simplicity we do test without * adding dangles, which is slightly inaccurate. */ /* w=10; */ /* for (ii=MAX2(i-w,1); (ii<=MIN2(i+w,n3-5)) && type; ii++) { */ /* for (jj=MAX2(j-w,1); jj<=MIN2(j+w,n4-5); jj++) */ /* if (r_fill[ii][jj]<E) {type=0; break;} */ /* } */ /* i=ii;j=jj; */ if (!type) continue; int begin = MAX2(5, i - alignment_length); int end = MIN2(n3 - 5, i - 1); char *s3 = (char *)vrna_alloc(sizeof(char) * (end - begin + 2) + 5); strncpy(s3, (s1 + begin), end - begin + 1); strcat(s3, "NNNNN\0"); int n5 = (int)strlen(s3); snoopT test = snoopfold_XS(s3, s2, access_s1, i, j, penalty, threshloop, threshLE, threshRE, threshDE, threshD, half_stem, max_half_stem, min_s2, max_s2, min_s1, max_s1, min_d1, min_d2, fullStemEnergy); if (test.energy == INF) { free(s3); continue; } if (test.Duplex_El > threshLE * 0.01 || test.Duplex_Er > threshRE * 0.01 || test.Loop_D > threshD * 0.01 || (test.Duplex_Er + test.Duplex_El) > threshDE * 0.01 || (test.Duplex_Er + test.Duplex_El + test.Loop_E) > threshTE * 0.01 || (test.Duplex_Er + test.Duplex_El + test.Loop_E + test.Loop_D + 410) > threshSE * 0.01) { free(test.structure); free(s3); continue; } char *s4; s4 = (char *)vrna_alloc(sizeof(char) * (n4 - 9)); strncpy(s4, s2 + 5, n4 - 10); s4[n4 - 10] = '\0'; char *s5 = vrna_alloc(sizeof(char) * n5 - test.i + 2 - 5); strncpy(s5, s3 + test.i - 1, n5 - test.i + 1 - 5); s5[n5 - test.i + 1 - 5] = '\0'; float dE = ((float)(access_s1[n5 - test.i + 1 - 5][i])) * 0.01; printf( "%s %3d,%-3d;%3d : %3d,%-3d (%5.2f = %5.2f + %5.2f + %5.2f + %5.2f + %5.2f + 4.10) (%5.2f)\n%s&%s\n", test.structure, i - (n5 - test.i), i - 5, i - (n5 - test.u), j - 5, j - 5 + (int)(strrchr(test.structure, '>') - strchr(test.structure, '>')), test.Loop_D + test.Duplex_El + test.Duplex_Er + test.Loop_E + 4.10 + dE, test.Duplex_El, test.Duplex_Er, test.Loop_E, test.Loop_D, dE, test.fullStemEnergy, s5, s4); if (name) { int begin_t, end_t, begin_q, end_q, and, pipe, k; char *psoutput; begin_q = 0; end_q = n4 - 10; begin_t = 0; end_t = n5 - test.i + 1 - 5; and = end_t + 1; pipe = test.u - test.i + 1; cut_point = end_t + 1; char *catseq, *catstruct;/* *fname; */ catseq = (char *)vrna_alloc(n5 + end_q - begin_q + 2); catstruct = (char *)vrna_alloc(n5 + end_q - begin_q + 2); strcpy(catseq, s5); strncpy(catstruct, test.structure, end_t); strcat(catseq, s4); strncat(catstruct, test.structure + end_t + 1, end_q - begin_q + 1); catstruct[end_t - begin_t + end_q - begin_q + 2] = '\0'; catseq[end_t - begin_t + end_q - begin_q + 2] = '\0'; int *relative_access; relative_access = vrna_alloc(sizeof(int) * strlen(s5)); relative_access[0] = access_s1[1][i - (n5 - test.i) + 5]; for (k = 1; k < (int)strlen(s5); k++) relative_access[k] = access_s1[k + 1][i - (n5 - test.i) + k + 5] - access_s1[k][i - (n5 - test.i) + k + 4]; psoutput = vrna_strdup_printf("sno_XS_%d_u_%d_%s.ps", count, i - (n5 - test.u), name); PS_rna_plot_snoop_a(catseq, catstruct, psoutput, relative_access, NULL); free(catseq); free(catstruct); free(relative_access); free(psoutput); count++; } free(s3); free(s4); free(s5); free(test.structure); } } for (i = 0; i <= n3; i++) { free(c_fill[i]); free(r_fill[i]); } free(c_fill); free(r_fill); free(S1_fill); free(S2_fill); free(SS1_fill); free(SS2_fill); delay_free = 0; } PRIVATE char * snoop_backtrack(int i, int j, const char *snoseq, int *Duplex_El, int *Duplex_Er, int *Loop_E, int *Loop_D, int *u, const int penalty, const int threshloop, const int threshLE, const int threshRE, const int threshDE, const int threshD, const int half_stem, const int max_half_stem, const int min_s2, const int max_s2, const int min_s1, const int max_s1, const int min_d1, const int min_d2) { /* backtrack structure going backwards from i, and forwards from j * return structure in bracket notation with & as separator */ int k, l, type, type2, E, traced, i0, j0; int traced_r = 0; /* flag for following backtrack in c or r */ char *st1, *st2, *struc; char *struc_loop; st1 = (char *)vrna_alloc(sizeof(char) * (n1 + 1)); st2 = (char *)vrna_alloc(sizeof(char) * (n2 + 1)); int *indx; int *mLoop; int *cLoop; folden **foldlist, **foldlist_XS; type = pair[S1[i]][S2[j]]; snoexport_fold_arrays(&indx, &mLoop, &cLoop, &foldlist, &foldlist_XS); i0 = i; j0 = j; /** *** if (i<n1) *Duplex_Er += P->dangle3[rtype[type]][SS1[i+1]]; *** if (j>1) *Duplex_Er += P->dangle5[rtype[type]][SS2[j-1]]; *** if (type>2) *Duplex_Er += P->TerminalAU; **/ *Duplex_Er += vrna_E_ext_stem(rtype[type], (j > 1) ? SS2[j - 1] : -1, (i < n1) ? SS1[i + 1] : -1, P); while (i > 0 && j <= n2 - min_d2) { if (!traced_r) { E = r[i][j]; traced = 0; st1[i - 1] = '<'; st2[j - 1] = '>'; type = pair[S1[i]][S2[j]]; if (!type) vrna_message_error("backtrack failed in fold duplex r"); for (k = i - 1; k > 0 && (i - k) < MAXLOOP_L; k--) { for (l = j + 1; l <= n2; l++) { int LE; if (i - k + l - j > 2 * MAXLOOP_L - 2) break; if (abs(i - k - l + j) >= ASS) continue; type2 = pair[S1[k]][S2[l]]; if (!type2) continue; LE = E_IntLoop(i - k - 1, l - j - 1, type2, rtype[type], SS1[k + 1], SS2[l - 1], SS1[i - 1], SS2[j + 1], P); if (E == r[k][l] + LE + (i - k + l - j) * penalty) { traced = 1; i = k; j = l; *Duplex_Er += LE; break; } } if (traced) break; } if (!traced) { if (/* pair[S1[i+1]][S2[j-1]] && */ j < max_s1 && j > min_s1 && j > n2 - max_s2 - max_half_stem && j < n2 - min_s2 - half_stem && S1[i - 2] == 4) { int min_k, max_k; max_k = MIN2(n2 - min_s2, j + max_half_stem + 1); min_k = MAX2(j + half_stem + 1, n2 - max_s2); folden *temp; temp = foldlist[j + 1]; while (temp->next) { int k = temp->k; if (pair[S1[i - 3]][S2[k + 1]] /*&& pair[S1[i-4]][S2[k+2]]*/) { /* introduce structure from RNAfold */ if (E == c[i - 3][k + 1] + temp->energy) { *Loop_E = temp->energy; st1[i - 3] = '|'; *u = i - 2; int a, b; /* int fix_ij=indx[k-1+1]+j+1; */ for (a = 0; a < MISMATCH; a++) { for (b = 0; b < MISMATCH; b++) { int ij = indx[k - 1 - a + 1] + j + 1 + b; if (cLoop[ij] == temp->energy) { struc_loop = snobacktrack_fold_from_pair(snoseq, j + 1 + b, k - a - 1 + 1); a = INF; b = INF; } } } traced = 1; traced_r = 1; i = i - 3; j = k + 1; break; } } /*else*/ if (pair[S1[i - 4]][S2[k + 1]] /*&& pair[S1[i-5]][S2[k+2]]*/) { /* introduce structure from RNAfold */ if (E == c[i - 4][k + 1] + temp->energy) { *Loop_E = temp->energy; st1[i - 3] = '|'; *u = i - 2; int a, b; /* int fix_ij=indx[k-1+1]+j+1; */ for (a = 0; a < MISMATCH; a++) { for (b = 0; b < MISMATCH; b++) { int ij = indx[k - 1 - a + 1] + j + 1 + b; if (cLoop[ij] == temp->energy) { struc_loop = snobacktrack_fold_from_pair(snoseq, j + 1 + b, k - a - 1 + 1); a = INF; b = INF; } } } traced = 1; traced_r = 1; i = i - 4; j = k + 1; break; } } /* else if */ temp = temp->next; } /* while temp-> next */ } /* test on j */ } /* traced? */ } /* traced_r? */ else { E = c[i][j]; traced = 0; st1[i - 1] = '<'; st2[j - 1] = '>'; type = pair[S1[i]][S2[j]]; if (!type) vrna_message_error("backtrack failed in fold duplex c"); for (k = i - 1; (i - k) < MAXLOOP_L; k--) { for (l = j + 1; l <= n2; l++) { int LE; if (i - k + l - j > 2 * MAXLOOP_L - 2) break; if (abs(i - k - l + j) >= ASS) continue; type2 = pair[S1[k]][S2[l]]; if (!type2) continue; LE = E_IntLoop(i - k - 1, l - j - 1, type2, rtype[type], SS1[k + 1], SS2[l - 1], SS1[i - 1], SS2[j + 1], P); if (E == c[k][l] + LE + (i - k + l - j) * penalty) { traced = 1; i = k; j = l; *Duplex_El += LE; break; } } if (traced) break; } } if (!traced) { int correction; correction = vrna_E_ext_stem(type, (i > 1) ? SS1[i - 1] : -1, (j < n2) ? SS2[j + 1] : -1, P); E -= correction; *Duplex_El += correction; /** *** if (i>1) {E -= P->dangle5[type][SS1[i-1]]; *Duplex_El +=P->dangle5[type][SS1[i-1]];} *** if (j<n2) {E -= P->dangle3[type][SS2[j+1]]; *Duplex_El +=P->dangle3[type][SS2[j+1]];} *** if (type>2) {E -= P->TerminalAU; *Duplex_El +=P->TerminalAU;} **/ if (E != P->DuplexInit) vrna_message_error("backtrack failed in fold duplex end"); else break; } } /* if (i>1) i--; */ /* if (j<n2) j++; */ /* struc = (char *) vrna_alloc(i0-i+1+j-j0+1+2); */ /* declare final duplex structure */ struc = (char *)vrna_alloc(i0 - i + 1 + n2 - 1 + 1 + 2); /* declare final duplex structure */ char *struc2; struc2 = (char *)vrna_alloc(n2 + 1); /* char * struct_const; */ for (k = MAX2(i, 1); k <= i0; k++) if (!st1[k - 1]) st1[k - 1] = '.'; /* for (k=j0; k<=j; k++) if (!st2[k-1]) st2[k-1] = struc_loop[k-1];*/ /* '.'; normal */ /* char * struct_const; */ /* struct_const = (char *) vrna_alloc(sizeof(char)*(n2+1)); */ for (k = 1; k <= n2; k++) { if (!st2[k - 1]) st2[k - 1] = struc_loop[k - 1]; /* '.'; */ struc2[k - 1] = st2[k - 1]; /* '.'; */ /* if (k>=j0 && k<=j){ */ /* struct_const[k-1]='x'; */ /* } */ /* else{ */ /* if(k<j0) {struct_const[k-1]='<';} */ /* if(k>j) {struct_const[k-1]='>';} */ /* } */ } char duplexseq_1[j0]; char duplexseq_2[n2 - j + 2]; if (j < n2) { strncpy(duplexseq_1, snoseq, j0 - 1); strcpy(duplexseq_2, snoseq + j); duplexseq_1[j0 - 1] = '\0'; duplexseq_2[n2 - j + 1] = '\0'; duplexT temp; temp = duplexfold(duplexseq_1, duplexseq_2); *Loop_D = MIN2(0, -410 + (int)100 * temp.energy); if (*Loop_D) { int l1, ibegin, iend, jbegin, jend; l1 = strchr(temp.structure, '&') - temp.structure; ibegin = temp.i - l1; iend = temp.i - 1; jbegin = temp.j; jend = temp.j + (int)strlen(temp.structure) - l1 - 2 - 1; for (k = ibegin + 1; k <= iend + 1; k++) struc2[k - 1] = temp.structure[k - ibegin - 1]; for (k = jbegin + j; k <= jend + j; k++) struc2[k - 1] = temp.structure[l1 + k - j - jbegin + 1]; } free(temp.structure); } strcpy(struc, st1 + MAX2(i - 1, 0)); strcat(struc, "&"); /* strcat(struc, st2); */ strncat(struc, struc2 + 5, (int)strlen(struc2) - 10); free(struc2); free(struc_loop); free(st1); free(st2); /* free_arrays(); */ return struc; } void Lsnoop_subopt_list_XS(const char *s1, const char *s2, const int **access_s1, int delta, int w, const int penalty, const int threshloop, const int threshLE, const int threshRE, const int threshDE, const int threshTE, const int threshSE, const int threshD, const int distance, const int half_stem, const int max_half_stem, const int min_s2, const int max_s2, const int min_s1, const int max_s1, const int min_d1, const int min_d2, const int alignment_length, const char *name, const int fullStemEnergy) { int min_colonne = INF; int max_pos; int max; max = INF; /* int temp; */ /* int nsubopt=10; */ n1 = (int)strlen(s1); n2 = (int)strlen(s2); int *position; int *position_j; int min_j_colonne; int max_pos_j = INF; position = (int *)vrna_alloc((n1 + 3) * sizeof(int)); position_j = (int *)vrna_alloc((n1 + 3) * sizeof(int)); /* int Eminj, Emin_l; */ int i, j;/* l1, Emin=INF, i_min=0, j_min=0; */ /* char *struc; */ /* snoopT mfe; */ int *indx; int *mLoop; int *cLoop; folden **foldlist, **foldlist_XS; int Duplex_El, Duplex_Er; int Loop_D; /* int u; */ int Loop_E; vrna_md_t md; Duplex_El = 0; Duplex_Er = 0; Loop_E = 0, Loop_D = 0; snoexport_fold_arrays(&indx, &mLoop, &cLoop, &foldlist, &foldlist_XS); set_model_details(&md); if ((!P) || (fabs(P->temperature - temperature) > 1e-6)) { snoupdate_fold_params(); if (P) free(P); P = vrna_params(&md); make_pair_matrix(); } lpair = (int **)vrna_alloc(sizeof(int *) * (6)); lc = (int **)vrna_alloc(sizeof(int *) * (6)); lr = (int **)vrna_alloc(sizeof(int *) * (6)); for (i = 0; i < 6; i++) { lc[i] = (int *)vrna_alloc(sizeof(int) * (n2 + 1)); lr[i] = (int *)vrna_alloc(sizeof(int) * (n2 + 1)); lpair[i] = (int *)vrna_alloc(sizeof(int) * (n2 + 1)); for (j = n2; j > -1; j--) { lc[i][j] = INF; lr[i][j] = INF; lpair[i][j] = 0; } } encode_seqs(s1, s2); int lim_maxj = n2 - min_d2; int lim_minj = min_d1; int lim_maxi = n1 - 5; for (i = 5; i <= lim_maxi; i++) { int idx = i % 5; int idx_1 = (i - 1) % 5; int idx_2 = (i - 2) % 5; int idx_3 = (i - 3) % 5; int idx_4 = (i - 4) % 5; int di1, di2, di3, di4; di1 = access_s1[5][i] - access_s1[4][i - 1]; di2 = access_s1[5][i - 1] - access_s1[4][i - 2] + di1; di3 = access_s1[5][i - 2] - access_s1[4][i - 3] + di2; di4 = access_s1[5][i - 3] - access_s1[4][i - 4] + di3; di1 = MIN2(di1, 165); di2 = MIN2(di2, 330); di3 = MIN2(di3, 495); di4 = MIN2(di4, 660); for (j = lim_maxj; j > lim_minj; j--) { int type, type2;/* E, k,l; */ type = pair[S1[i]][S2[j]]; lpair[idx][j] = type; lc[idx][j] = (type) ? P->DuplexInit + access_s1[1][i] : INF; lr[idx][j] = INF; if (!type) continue; if ( /*pair[S1[i+1]][S2[j-1]] && Be sure it binds*/ j < max_s1 && j > min_s1 && j > n2 - max_s2 - max_half_stem && j < n2 - min_s2 - half_stem && S1[i - 2] == 4) { /*constraint on s2 and i*/ int min_k, max_k; max_k = MIN2(n2 - min_s2, j + max_half_stem + 1); min_k = MAX2(j + half_stem + 1, n2 - max_s2); folden *temp; temp = foldlist[j + 1]; while (temp->next) { int k = temp->k; /* if(k >= min_k-1 && k < max_k){ comment to recover normal behaviour */ if (lpair[idx_3][k + 1] && lc[idx_3][k + 1] /*+di3*/ < 411 /*&& lpair[idx_4][k+2]*/) /* remove second condition */ lr[idx][j] = MIN2(lr[idx][j], di3 + lc[idx_3][k + 1] + temp->energy); /*--NU--*/ /*else*/ if (lpair[idx_4][k + 1] && /*di4 +*/ lc[idx_4][k + 1] < 411) /*--NUN--*/ /* remove second condition */ lr[idx][j] = MIN2(lr[idx][j], di4 + lc[idx_4][k + 1] + temp->energy); /* } */ temp = temp->next; } } /* dangle 5'SIDE relative to the mRNA */ /** *** lc[idx][j] += P->dangle5[type][SS1[i-1]]; *** lc[idx][j] += P->dangle3[type][SS2[j+1]]; *** if (type>2) lc[idx][j] += P->TerminalAU; **/ lc[idx][j] += vrna_E_ext_stem(type, SS1[i - 1], SS2[j + 1], P); /* if(j<n2 && i>1){ */ /* type2=pair[S1[i-1]][S2[j+1]]; */ type2 = lpair[idx_1][j + 1]; if (type2 > 0) { lc[idx][j] = MIN2(lc[idx_1][j + 1] + E_IntLoop(0, 0, type2, rtype[type], SS1[i], SS2[j], SS1[i - 1], SS2[j + 1], P) + di1, lc[idx][j]); lr[idx][j] = MIN2(lr[idx_1][j + 1] + E_IntLoop(0, 0, type2, rtype[type], SS1[i], SS2[j], SS1[i - 1], SS2[j + 1], P) + di1, lr[idx][j]); } type2 = lpair[idx_2][j + 2]; if (type2 > 0) { lc[idx][j] = MIN2(lc[idx_2][j + 2] + E_IntLoop(1, 1, type2, rtype[type], SS1[i - 1], SS2[j + 1], SS1[i - 1], SS2[j + 1], P) + di2, lc[idx][j]); lr[idx][j] = MIN2(lr[idx_2][j + 2] + E_IntLoop(1, 1, type2, rtype[type], SS1[i - 1], SS2[j + 1], SS1[i - 1], SS2[j + 1], P) + di2, lr[idx][j]); } type2 = lpair[idx_3][j + 3]; if (type2 > 0) { lc[idx][j] = MIN2(lc[idx_3][j + 3] + E_IntLoop(2, 2, type2, rtype[type], SS1[i - 2], SS2[j + 2], SS1[i - 1], SS2[j + 1], P) + di3, lc[idx][j]); lr[idx][j] = MIN2(lr[idx_3][j + 3] + E_IntLoop(2, 2, type2, rtype[type], SS1[i - 2], SS2[j + 2], SS1[i - 1], SS2[j + 1], P) + di3, lr[idx][j]); } int bla; int temp2; temp2 = min_colonne; bla = lr[idx][j] + vrna_E_ext_stem(rtype[type], SS2[j - 1], SS1[i + 1], P); /** *** (type>2?P->TerminalAU:0)+P->dangle3[rtype[type]][SS1[i+1]]+P->dangle5[rtype[type]][SS2[j-1]]; **/ min_colonne = MIN2(bla, min_colonne); if (temp2 > min_colonne) min_j_colonne = j; } position[i] = min_colonne; if (max >= min_colonne) { max = min_colonne; max_pos = i; max_pos_j = min_j_colonne; } position_j[i] = min_j_colonne; min_colonne = INF; } free(S1); free(S2); free(SS1); free(SS2); if (max < threshTE + 30 * alignment_length) { find_max_snoop_XS(s1, s2, access_s1, max, alignment_length, position, position_j, delta, distance, penalty, threshloop, threshLE, threshRE, threshDE, threshTE, threshSE, threshD, half_stem, max_half_stem, min_s2, max_s2, min_s1, max_s1, min_d1, min_d2, name, fullStemEnergy); } for (i = 1; i < 6; i++) { free(lc[i]); free(lr[i]); free(lpair[i]); } free(lc[0]); free(lr[0]); free(lpair[0]); free(lc); free(lr); free(lpair); free(position); free(position_j); } PRIVATE void find_max_snoop_XS(const char *s1, const char *s2, const int **access_s1, const int max, const int alignment_length, const int *position, const int *position_j, const int delta, const int distance, const int penalty, const int threshloop, const int threshLE, const int threshRE, const int threshDE, const int threshTE, const int threshSE, const int threshD, const int half_stem, const int max_half_stem, const int min_s2, const int max_s2, const int min_s1, const int max_s1, const int min_d1, const int min_d2, const char *name, const int fullStemEnergy) { int count = 0; int n3 = (int)strlen(s1); int n4 = (int)strlen(s2); int pos = n1 - 4; int max_pos_j; int threshold = MIN2(threshTE + alignment_length * 30, -100); /* printf("threshTE %d max %d\n", threshTE, max); */ /* #pragma omp parallel for */ /* for(pos=n1+1;pos>distance;pos--){ */ while (pos-- > 5) { int temp_min = 0; if (position[pos] < (threshold)) { int search_range; search_range = distance + 1; while (--search_range) if (position[pos - search_range] <= position[pos - temp_min]) temp_min = search_range; pos -= temp_min; max_pos_j = position_j[pos]; int begin = MAX2(5, pos - alignment_length); int end = MIN2(n3 - 5, pos - 1); char *s3 = (char *)vrna_alloc(sizeof(char) * (end - begin + 2) + 5); strncpy(s3, (s1 + begin), end - begin + 1); strcat(s3, "NNNNN\0"); int n5 = (int)strlen(s3); snoopT test; test = snoopfold_XS(s3, s2, access_s1, pos, max_pos_j, penalty, threshloop, threshLE, threshRE, threshDE, threshD, half_stem, max_half_stem, min_s2, max_s2, min_s1, max_s1, min_d1, min_d2, fullStemEnergy); if (test.energy == INF) { free(s3); continue; } if (test.Duplex_El > threshLE * 0.01 || test.Duplex_Er > threshRE * 0.01 || test.Loop_D > threshD * 0.01 || (test.Duplex_Er + test.Duplex_El) > threshDE * 0.01 || (test.Duplex_Er + test.Duplex_El + test.Loop_E) > threshTE * 0.01 || (test.Duplex_Er + test.Duplex_El + test.Loop_E + test.Loop_D + 410) > threshSE * 0.01) { free(test.structure); free(s3); continue; } char *s4; s4 = (char *)vrna_alloc(sizeof(char) * (n4 - 9)); strncpy(s4, s2 + 5, n4 - 10); s4[n4 - 10] = '\0'; char *s5 = vrna_alloc(sizeof(char) * n5 - test.i + 2 - 5); strncpy(s5, s3 + test.i - 1, n5 - test.i + 1 - 5); s5[n5 - test.i + 1 - 5] = '\0'; float dE = ((float)(access_s1[n5 - test.i + 1 - 5][pos])) * 0.01; printf( "%s %3d,%-3d;%3d : %3d,%-3d (%5.2f = %5.2f + %5.2f + %5.2f + %5.2f + %5.2f + 4.10) (%5.2f)\n%s&%s\n", test.structure, pos - (n5 - test.i), pos - 5, pos - (n5 - test.u), max_pos_j - 5, max_pos_j - 5 + (int)(strrchr(test.structure, '>') - strchr(test.structure, '>')), test.Loop_D + test.Duplex_El + test.Duplex_Er + test.Loop_E + 4.10 + dE, test.Duplex_El, test.Duplex_Er, test.Loop_E, test.Loop_D, dE, test.fullStemEnergy, s5, s4); if (name) { int begin_t, end_t, begin_q, end_q, and, pipe, i; char *psoutput; begin_q = 0; end_q = n4 - 10; begin_t = 0; end_t = n5 - test.i + 1 - 5; and = end_t + 1; pipe = test.u - test.i + 1; cut_point = end_t + 1; char *catseq, *catstruct;/* *fname; */ catseq = (char *)vrna_alloc(n5 + end_q - begin_q + 2); catstruct = (char *)vrna_alloc(n5 + end_q - begin_q + 2); strcpy(catseq, s5); strncpy(catstruct, test.structure, end_t); strcat(catseq, s4); strncat(catstruct, test.structure + end_t + 1, end_q - begin_q + 1); catstruct[end_t - begin_t + end_q - begin_q + 2] = '\0'; catseq[end_t - begin_t + end_q - begin_q + 2] = '\0'; int *relative_access; relative_access = vrna_alloc(sizeof(int) * strlen(s5)); relative_access[0] = access_s1[1][pos - (n5 - test.i) + 5]; for (i = 1; i < (int)strlen(s5); i++) relative_access[i] = access_s1[i + 1][pos - (n5 - test.i) + i + 5] - access_s1[i][pos - (n5 - test.i) + i + 4]; psoutput = vrna_strdup_printf("sno_XS_%d_u_%d_%s.ps", count, pos - (n5 - test.u), name); PS_rna_plot_snoop_a(catseq, catstruct, psoutput, relative_access, NULL); free(catseq); free(catstruct); free(relative_access); free(psoutput); count++; } free(s3); free(s4); free(s5); free(test.structure); } } } snoopT snoopfold_XS(const char *s1, const char *s2, const int **access_s1, const int pos_i, const int pos_j, const int penalty, const int threshloop, const int threshLE, const int threshRE, const int threshDE, const int threshD, const int half_stem, const int max_half_stem, const int min_s2, const int max_s2, const int min_s1, const int max_s1, const int min_d1, const int min_d2, const int fullStemEnergy) { /* int Eminj, Emin_l; */ int a, b, i, j, Emin = INF, a_min = 0, b_min = 0; char *struc; snoopT mfe; int *indx; int *mLoop; int *cLoop; folden **foldlist, **foldlist_XS; int Duplex_El, Duplex_Er; int Loop_D; int u; int Loop_E; vrna_md_t md; Duplex_El = 0; Duplex_Er = 0; Loop_E = 0, Loop_D = 0; snoexport_fold_arrays(&indx, &mLoop, &cLoop, &foldlist, &foldlist_XS); n1 = (int)strlen(s1); n2 = (int)strlen(s2); set_model_details(&md); if ((!P) || (fabs(P->temperature - temperature) > 1e-6)) { snoupdate_fold_params(); if (P) free(P); P = vrna_params(&md); make_pair_matrix(); } c = (int **)vrna_alloc(sizeof(int *) * (n1 + 1)); r = (int **)vrna_alloc(sizeof(int *) * (n1 + 1)); for (i = 0; i <= n1; i++) { c[i] = (int *)vrna_alloc(sizeof(int) * (n2 + 1)); r[i] = (int *)vrna_alloc(sizeof(int) * (n2 + 1)); for (j = n2; j > -1; j--) { c[i][j] = INF; r[i][j] = INF; } } encode_seqs(s1, s2); i = n1 - 5; j = pos_j; /* printf("tar: %s\nsno: %s\n ", s1, s2); */ /* printf("pos_i %d pos_j %d\n", pos_i, pos_j); */ /* printf("type %d n1 %d n2 %d S1[n1] %d S2[n2] %d", pair[S1[i]][S2[j]], n1, n2, S1[n1], S2[n2]); */ int type, type2, E, p, q; r[i][j] = P->DuplexInit; /* r[i][j] += P->dangle3[rtype[type]][SS1[i+1]] + P->dangle5[rtype[type]][SS2[j-1]]; */ if (pair[S1[i]][S2[j]] > 2) r[i][j] += P->TerminalAU; for (a = i - 1; a > 0; a--) { /* i-1 */ r[a + 1][0] = INF; for (b = j + 1; b <= n2 - min_d2; b++) { /* j+1 */ r[a][b] = INF; type = pair[S1[a]][S2[b]]; if (!type) continue; if (S1[a + 1] == 4) { folden *temp; temp = foldlist_XS[b - 1]; while (temp->next) { int k = temp->k; if (pair[S1[a + 3]][S2[k - 1]] && k < max_s1 && k > min_s1 && k > n2 - max_s2 - max_half_stem && k < n2 - min_s2 - half_stem /*&& r[a+3][k-1] + access_s1[i-(a+3)+1][pos_i] < 411*/) /* remove last condition last condition is to check if the interaction is stable enough */ c[a][b] = MIN2(c[a][b], r[a + 3][k - 1] + temp->energy); temp = temp->next; } } if (S1[a + 2] == 4) { folden *temp; temp = foldlist_XS[b - 1]; while (temp->next) { int k = temp->k; if (pair[S1[a + 4]][S2[k - 1]] && k < max_s1 && k > min_s1 && k > n2 - max_s2 - max_half_stem && k < n2 - min_s2 - half_stem /*&& r[a+4][k-1] + access_s1[i-(a+4)+1][pos_i] < 411 */) /* remove last condition */ c[a][b] = MIN2(c[a][b], r[a + 4][k - 1] + temp->energy); temp = temp->next; } } for (p = a + 1; p < n1 && (p - a) < MAXLOOP_L; p++) { /* p < n1 */ for (q = b - 1; q > 1; q--) { /* q > 1 */ if (p - a + b - q > 2 * MAXLOOP_L - 2) break; if (abs((p - a) - (b - q)) >= ASS) continue; type2 = pair[S1[p]][S2[q]]; if (!type2) continue; E = E_IntLoop(p - a - 1, b - q - 1, type2, rtype[type], SS1[a + 1], SS2[b - 1], SS1[p - 1], SS2[q + 1], P); c[a][b] = MIN2(c[a][b], c[p][q] + E); r[a][b] = MIN2(r[a][b], r[p][q] + E); } } E = c[a][b]; if (type > 2) E += P->TerminalAU; /* E +=P->dangle5[rtype[type]][SS1[i+1]]; */ /* E +=P->dangle5[rtype[type]][SS2[j-1]]; */ E += access_s1[i - a + 1][pos_i]; if (E < Emin) { Emin = E; a_min = a; b_min = b; } } } if (Emin > 0) { printf("no target found under the constraints chosen\n"); for (i = 0; i <= n1; i++) { free(r[i]); free(c[i]); } free(c); free(r); free(S1); free(S2); free(SS1); free(SS2); mfe.energy = INF; return mfe; } type2 = pair[S1[a_min]][S2[b_min]]; if (type2 > 2) Emin += P->TerminalAU; mfe.energy = ((float)(Emin)) / 100; struc = snoop_backtrack_XS(a_min, b_min, s2, &Duplex_El, &Duplex_Er, &Loop_E, &Loop_D, &u, penalty, threshloop, threshLE, threshRE, threshDE, threshD, half_stem, max_half_stem, min_s2, max_s2, min_s1, max_s1, min_d1, min_d2); mfe.i = a_min; mfe.j = b_min; mfe.u = u; mfe.Duplex_Er = (float)Duplex_Er / 100; mfe.Duplex_El = (float)Duplex_El / 100; mfe.Loop_D = (float)Loop_D / 100; mfe.Loop_E = (float)Loop_E / 100; mfe.energy = (float)Emin / 100; mfe.fullStemEnergy = (float)fullStemEnergy / 100; mfe.structure = struc; return mfe; } PRIVATE char * snoop_backtrack_XS(int i, int j, const char *snoseq, int *Duplex_El, int *Duplex_Er, int *Loop_E, int *Loop_D, int *u, const int penalty, const int threshloop, const int threshLE, const int threshRE, const int threshDE, const int threshD, const int half_stem, const int max_half_stem, const int min_s2, const int max_s2, const int min_s1, const int max_s1, const int min_d1, const int min_d2) { /* backtrack structure going backwards from i, and forwards from j * return structure in bracket notation with & as separator */ int k, l, type, type2, E, traced, i0, j0; int traced_c = 0; /* flag for following backtrack in c or r */ char *st1, *st2, *struc; char *struc_loop; st1 = (char *)vrna_alloc(sizeof(char) * (n1 + 1)); st2 = (char *)vrna_alloc(sizeof(char) * (n2 + 1)); int *indx; int *mLoop; int *cLoop; folden **foldlist, **foldlist_XS; type = pair[S1[i]][S2[j]]; snoexport_fold_arrays(&indx, &mLoop, &cLoop, &foldlist, &foldlist_XS); i0 = i; j0 = j; /* i0=MAX2(i,1); j0=MIN2(j+1,n2); */ while (i <= n1 && j >= 1) { if (!traced_c) { E = c[i][j]; traced = 0; st1[i] = '<'; st2[j - 1] = '>'; type = pair[S1[i]][S2[j]]; if (!type) vrna_message_error("backtrack failed in fold duplex c"); for (k = i + 1; k > 0 && (k - i) < MAXLOOP_L; k++) { for (l = j - 1; l >= 1; l--) { int LE; if (k - i + j - l > 2 * MAXLOOP_L - 2) break; if (abs(k - i - j + l) >= ASS) continue; type2 = pair[S1[k]][S2[l]]; if (!type2) continue; LE = E_IntLoop(k - i - 1, j - l - 1, type2, rtype[type], SS1[i + 1], SS2[j - 1], SS1[k - 1], SS2[l + 1], P); if (E == c[k][l] + LE) { traced = 1; i = k; j = l; *Duplex_El += LE; break; } } if (traced) break; } if (!traced) { if (S1[i + 1] == 4) { folden *temp; temp = foldlist_XS[j - 1]; while (temp->next) { int k = temp->k; if (pair[S1[i + 3]][S2[k - 1]] && k < max_s1 && k > min_s1 && k > n2 - max_s2 - max_half_stem && k < n2 - min_s2 - half_stem) { if (E == r[i + 3][k - 1] + temp->energy) { *Loop_E = temp->energy; st1[i + 1] = '|'; st1[i + 2] = '.'; *u = i + 1; int a, b; for (a = 0; a < MISMATCH; a++) { for (b = 0; b < MISMATCH; b++) { int ij = indx[j - 1 - a] + k + b; if (cLoop[ij] == temp->energy) { struc_loop = snobacktrack_fold_from_pair(snoseq, k + b, j - 1 - a); a = INF; b = INF; } } } traced = 1; traced_c = 1; i = i + 3; j = k - 1; break; } } temp = temp->next; } } if (S1[i + 2] == 4) { /* introduce structure from RNAfold */ folden *temp; temp = foldlist_XS[j - 1]; while (temp->next) { int k = temp->k; if (pair[S1[i + 4]][S2[k - 1]] && k < max_s1 && k > min_s1 && k > n2 - max_s2 - max_half_stem && k < n2 - min_s2 - half_stem) { if (E == r[i + 4][k - 1] + temp->energy) { *Loop_E = temp->energy; st1[i + 2] = '|'; st1[i + 1] = st1[i + 3] = '.'; *u = i + 2; int a, b; for (a = 0; a < MISMATCH; a++) { for (b = 0; b < MISMATCH; b++) { int ij = indx[j - 1 - a] + k + b; if (cLoop[ij] == temp->energy) { struc_loop = snobacktrack_fold_from_pair(snoseq, k + b, j - a - 1); a = INF; b = INF; } } } traced = 1; traced_c = 1; i = i + 4; j = k - 1; break; } } temp = temp->next; } } } /* traced? */ } /* traced_r? */ else { E = r[i][j]; traced = 0; st1[i] = '<'; st2[j - 1] = '>'; type = pair[S1[i]][S2[j]]; if (!type) vrna_message_error("backtrack failed in fold duplex r"); for (k = i + 1; k > 0 && (k - i) < MAXLOOP_L; k++) { for (l = j - 1; l >= 1; l--) { int LE; if (k - i + j - l > 2 * MAXLOOP_L - 2) break; if (abs(k - i - j + l) >= ASS) continue; type2 = pair[S1[k]][S2[l]]; if (!type2) continue; LE = E_IntLoop(k - i - 1, j - l - 1, type2, rtype[type], SS1[i + 1], SS2[j - 1], SS1[k - 1], SS2[l + 1], P); if (E == r[k][l] + LE) { traced = 1; i = k; j = l; *Duplex_Er += LE; break; } } if (traced) break; } } if (!traced) { /* if (i>1) {E -= P->dangle5[type][SS1[i-1]]; *Duplex_El +=P->dangle5[type][SS1[i-1]];} */ /* if (j<n2) {E -= P->dangle3[type][SS2[j+1]]; *Duplex_El +=P->dangle3[type][SS2[j+1]];} */ if (type > 2) { E -= P->TerminalAU; *Duplex_Er += P->TerminalAU; } if (E != P->DuplexInit) vrna_message_error("backtrack failed in fold duplex end"); else break; } } /* struc = (char *) vrna_alloc(i0-i+1+j-j0+1+2); */ /* declare final duplex structure */ struc = (char *)vrna_alloc(i - i0 + 1 + n2); /* declare final duplex structure */ char *struc2; struc2 = (char *)vrna_alloc(n2 + 1); /* char * struct_const; */ for (k = MIN2(i0, 1); k <= i; k++) if (!st1[k - 1]) st1[k - 1] = '.'; /* for (k=j0; k<=j; k++) if (!st2[k-1]) st2[k-1] = struc_loop[k-1];*/ /* '.'; normal */ /* char * struct_const; */ /* struct_const = (char *) vrna_alloc(sizeof(char)*(n2+1)); */ for (k = 1; k <= n2; k++) { if (!st2[k - 1]) st2[k - 1] = struc_loop[k - 1]; /* '.'; */ struc2[k - 1] = st2[k - 1]; /* '.'; */ /* if (k>=j0 && k<=j){ */ /* struct_const[k-1]='x'; */ /* } */ /* else{ */ /* if(k<j0) {struct_const[k-1]='<';} */ /* if(k>j) {struct_const[k-1]='>';} */ /* } */ } char duplexseq_1[j]; char duplexseq_2[n2 - j0 + 2]; if (j0 < n2) { strncpy(duplexseq_1, snoseq, j - 1); strcpy(duplexseq_2, snoseq + j0); duplexseq_1[j - 1] = '\0'; duplexseq_2[n2 - j0 + 1] = '\0'; duplexT temp; temp = duplexfold(duplexseq_1, duplexseq_2); *Loop_D = MIN2(0, -410 + (int)100 * temp.energy); if (*Loop_D) { int l1, ibegin, iend, jbegin, jend; l1 = strchr(temp.structure, '&') - temp.structure; ibegin = temp.i - l1; iend = temp.i - 1; jbegin = temp.j; jend = temp.j + (int)strlen(temp.structure) - l1 - 2 - 1; for (k = ibegin + 1; k <= iend + 1; k++) struc2[k - 1] = temp.structure[k - ibegin - 1]; for (k = jbegin + j0; k <= jend + j0; k++) struc2[k - 1] = temp.structure[l1 + k - j0 - jbegin + 1]; } free(temp.structure); } strcpy(struc, st1 + MAX2(i0, 1)); strcat(struc, "&"); /* strcat(struc, st2); */ strncat(struc, struc2 + 5, (int)strlen(struc2) - 10); free(struc2); free(struc_loop); free(st1); free(st2); for (i = 0; i <= n1; i++) { free(r[i]); free(c[i]); } free(c); free(r); free(S1); free(S2); free(SS1); free(SS2); /* free_arrays(); */ return struc; } PRIVATE int covscore(const int *types, int n_seq) { /* calculate co-variance bonus for a pair depending on */ /* compensatory/consistent mutations and incompatible seqs */ /* should be 0 for conserved pairs, >0 for good pairs */ #define NONE -10000 /* score for forbidden pairs */ int k, l, s, score, pscore; int dm[7][7] = { { 0, 0, 0, 0, 0, 0, 0 }, /* hamming distance between pairs */ { 0, 0, 2, 2, 1, 2, 2 } /* CG */, { 0, 2, 0, 1, 2, 2, 2 } /* GC */, { 0, 2, 1, 0, 2, 1, 2 } /* GU */, { 0, 1, 2, 2, 0, 2, 1 } /* UG */, { 0, 2, 2, 1, 2, 0, 2 } /* AU */, { 0, 2, 2, 2, 1, 2, 0 } /* UA */ }; int pfreq[8] = { 0, 0, 0, 0, 0, 0, 0, 0 }; for (s = 0; s < n_seq; s++) pfreq[types[s]]++; if (pfreq[0] * 2 > n_seq) return NONE; for (k = 1, score = 0; k <= 6; k++) /* ignore pairtype 7 (gap-gap) */ for (l = k + 1; l <= 6; l++) /* scores for replacements between pairtypes */ /* consistent or compensatory mutations score 1 or 2 */ score += pfreq[k] * pfreq[l] * dm[k][l]; /* counter examples score -1, gap-gap scores -0.25 */ pscore = cv_fact * ((UNIT * score) / n_seq - nc_fact * UNIT * (pfreq[0] + pfreq[7] * 0.25)); return pscore; } /*---------------------------------------------------------------------------*/ PRIVATE short * aliencode_seq(const char *sequence) { unsigned int i, l; short *Stemp; l = strlen(sequence); Stemp = (short *)vrna_alloc(sizeof(short) * (l + 2)); Stemp[0] = (short)l; /* make numerical encoding of sequence */ for (i = 1; i <= l; i++) Stemp[i] = (short)encode_char(toupper(sequence[i - 1])); /* for circular folding add first base at position n+1 */ /* Stemp[l+1] = Stemp[1]; */ return Stemp; } PRIVATE short * encode_seq(const char *sequence) { unsigned int i, l; short *S; l = strlen(sequence); extern double nc_fact; S = (short *)vrna_alloc(sizeof(short) * (l + 2)); S[0] = (short)l; /* make numerical encoding of sequence */ for (i = 1; i <= l; i++) S[i] = (short)encode_char(toupper(sequence[i - 1])); /* for circular folding add first base at position n+1 */ S[l + 1] = S[1]; return S; } PRIVATE void encode_seqs(const char *s1, const char *s2) { unsigned int i, l; l = strlen(s1); S1 = encode_seq(s1); SS1 = (short *)vrna_alloc(sizeof(short) * (l + 1)); /* SS1 exists only for the special X K and I bases and energy_set!=0 */ for (i = 1; i <= l; i++) /* make numerical encoding of sequence */ SS1[i] = alias[S1[i]]; /* for mismatches of nostandard bases */ l = strlen(s2); S2 = encode_seq(s2); SS2 = (short *)vrna_alloc(sizeof(short) * (l + 1)); /* SS2 exists only for the special X K and I bases and energy_set!=0 */ for (i = 1; i <= l; i++) /* make numerical encoding of sequence */ SS2[i] = alias[S2[i]]; /* for mismatches of nostandard bases */ } PRIVATE int compare(const void *sub1, const void *sub2) { int d; if (((snoopT *)sub1)->energy > ((snoopT *)sub2)->energy) return 1; if (((snoopT *)sub1)->energy < ((snoopT *)sub2)->energy) return -1; d = ((snoopT *)sub1)->i - ((snoopT *)sub2)->i; if (d != 0) return d; return ((snoopT *)sub1)->j - ((snoopT *)sub2)->j; }
patch-dcraw__indi.c
$NetBSD: patch-dcraw__indi.c,v 1.2 2020/06/01 00:14:21 gdt Exp $ Drop default(none), because the OPENMP specification is unstable; some versions prohibit declaring constants as shared and some require it. --- dcraw_indi.c.orig 2015-06-16 03:58:38.000000000 +0000 +++ dcraw_indi.c @@ -142,7 +142,6 @@ void CLASS wavelet_denoise_INDI(ushort(* private(c,i,hpass,lev,lpass,row,col,thold,fimg,temp) #else #pragma omp parallel for \ - default(none) \ shared(nc,image,size) \ private(c,i,hpass,lev,lpass,row,col,thold,fimg,temp) #endif @@ -413,7 +412,6 @@ void CLASS vng_interpolate_INDI(ushort(* progress(PROGRESS_INTERPOLATE, -height); #ifdef _OPENMP #pragma omp parallel \ - default(none) \ shared(image,code,prow,pcol,h) \ private(row,col,g,brow,rowtmp,pix,ip,gval,diff,gmin,gmax,thold,sum,color,num,c,t) #endif @@ -496,7 +494,6 @@ void CLASS ppg_interpolate_INDI(ushort(* #ifdef _OPENMP #pragma omp parallel \ - default(none) \ shared(image,dir,diff) \ private(row,col,i,d,c,pix,guess) #endif
convolution_1x1_bf16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv1x1s1_sgemm_transform_kernel_bf16s_neon(const Mat& _kernel, Mat& kernel_tm, int inch, int outch) { const float* kernel = _kernel; // interleave #if __ARM_NEON && __aarch64__ kernel_tm.create(4 * 8, inch / 4 + inch % 4, outch / 8 + (outch % 8) / 4 + outch % 4, (size_t)2u, 1); #else kernel_tm.create(4 * 4, inch / 4 + inch % 4, outch / 4 + outch % 4, (size_t)2u, 1); #endif // __ARM_NEON && __aarch64__ int p = 0; #if __ARM_NEON && __aarch64__ for (; p + 7 < outch; p += 8) { const float* kernel0 = kernel + (p + 0) * inch; const float* kernel1 = kernel + (p + 1) * inch; const float* kernel2 = kernel + (p + 2) * inch; const float* kernel3 = kernel + (p + 3) * inch; const float* kernel4 = kernel + (p + 4) * inch; const float* kernel5 = kernel + (p + 5) * inch; const float* kernel6 = kernel + (p + 6) * inch; const float* kernel7 = kernel + (p + 7) * inch; unsigned short* ktmp = kernel_tm.channel(p / 8); for (int q = 0; q < inch; q++) { // kernel0...7 0 ktmp[0] = float32_to_bfloat16(kernel0[0]); ktmp[1] = float32_to_bfloat16(kernel1[0]); ktmp[2] = float32_to_bfloat16(kernel2[0]); ktmp[3] = float32_to_bfloat16(kernel3[0]); ktmp[4] = float32_to_bfloat16(kernel4[0]); ktmp[5] = float32_to_bfloat16(kernel5[0]); ktmp[6] = float32_to_bfloat16(kernel6[0]); ktmp[7] = float32_to_bfloat16(kernel7[0]); ktmp += 8; kernel0 += 1; kernel1 += 1; kernel2 += 1; kernel3 += 1; kernel4 += 1; kernel5 += 1; kernel6 += 1; kernel7 += 1; } } #endif // __ARM_NEON && __aarch64__ for (; p + 3 < outch; p += 4) { const float* kernel0 = kernel + (p + 0) * inch; const float* kernel1 = kernel + (p + 1) * inch; const float* kernel2 = kernel + (p + 2) * inch; const float* kernel3 = kernel + (p + 3) * inch; #if __ARM_NEON && __aarch64__ unsigned short* ktmp = kernel_tm.channel(p / 8 + (p % 8) / 4); #else unsigned short* ktmp = kernel_tm.channel(p / 4); #endif // __ARM_NEON && __aarch64__ for (int q = 0; q < inch; q++) { // kernel0...3 0 ktmp[0] = float32_to_bfloat16(kernel0[0]); ktmp[1] = float32_to_bfloat16(kernel1[0]); ktmp[2] = float32_to_bfloat16(kernel2[0]); ktmp[3] = float32_to_bfloat16(kernel3[0]); ktmp += 4; kernel0 += 1; kernel1 += 1; kernel2 += 1; kernel3 += 1; } } for (; p < outch; p++) { const float* kernel0 = kernel + p * inch; #if __ARM_NEON && __aarch64__ unsigned short* ktmp = kernel_tm.channel(p / 8 + (p % 8) / 4 + p % 4); #else unsigned short* ktmp = kernel_tm.channel(p / 4 + p % 4); #endif // __ARM_NEON && __aarch64__ for (int q = 0; q < inch; q++) { ktmp[0] = float32_to_bfloat16(kernel0[0]); ktmp++; kernel0++; } } } static void conv1x1s1_sgemm_bf16s_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outch = top_blob.c; const int size = w * h; const float* bias = _bias; // interleave Mat tmp(8 * 4, inch / 4 + inch % 4, size / 8 + (size % 8) / 4 + size % 4, 2u, opt.workspace_allocator); { int nn_size = size >> 3; int remain_size_start = nn_size << 3; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = ii * 8; const unsigned short* img0 = bottom_blob.channel(0); img0 += i; unsigned short* tmpptr = tmp.channel(i / 8); for (int q = 0; q < inch; q++) { #if __ARM_NEON #if __aarch64__ vst1q_u16(tmpptr, vld1q_u16(img0)); tmpptr += 8; img0 += bottom_blob.cstep; #else asm volatile( "pld [%0, #128] \n" "vld1.u16 {d0-d1}, [%0 :64] \n" "vst1.u16 {d0-d1}, [%1 :64]! \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "q0"); img0 += bottom_blob.cstep; #endif // __aarch64__ #else tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr[2] = img0[2]; tmpptr[3] = img0[3]; tmpptr[4] = img0[4]; tmpptr[5] = img0[5]; tmpptr[6] = img0[6]; tmpptr[7] = img0[7]; tmpptr += 8; img0 += bottom_blob.cstep; #endif // __ARM_NEON } } nn_size = (size - remain_size_start) >> 2; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 4; const unsigned short* img0 = bottom_blob.channel(0); img0 += i; unsigned short* tmpptr = tmp.channel(i / 8 + (i % 8) / 4); for (int q = 0; q < inch; q++) { #if __ARM_NEON #if __aarch64__ vst1_u16(tmpptr, vld1_u16(img0)); tmpptr += 4; img0 += bottom_blob.cstep; #else asm volatile( "pld [%0, #64] \n" "vld1.u16 {d0}, [%0 :64] \n" "vst1.u16 {d0}, [%1 :64]! \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "d0"); img0 += bottom_blob.cstep; #endif // __aarch64__ #else tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr[2] = img0[2]; tmpptr[3] = img0[3]; tmpptr += 4; img0 += bottom_blob.cstep; #endif // __ARM_NEON } } remain_size_start += nn_size << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int i = remain_size_start; i < size; i++) { const unsigned short* img0 = bottom_blob.channel(0); img0 += i; unsigned short* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4); for (int q = 0; q < inch; q++) { tmpptr[0] = img0[0]; tmpptr++; img0 += bottom_blob.cstep; } } } int nn_outch = 0; int remain_outch_start = 0; #if __ARM_NEON && __aarch64__ nn_outch = outch >> 3; remain_outch_start = nn_outch << 3; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 8; unsigned short* outptr0 = top_blob.channel(p); unsigned short* outptr1 = top_blob.channel(p + 1); unsigned short* outptr2 = top_blob.channel(p + 2); unsigned short* outptr3 = top_blob.channel(p + 3); unsigned short* outptr4 = top_blob.channel(p + 4); unsigned short* outptr5 = top_blob.channel(p + 5); unsigned short* outptr6 = top_blob.channel(p + 6); unsigned short* outptr7 = top_blob.channel(p + 7); const float zeros[8] = {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f}; const float* biasptr = bias ? bias + p : zeros; int i = 0; for (; i + 7 < size; i += 8) { const unsigned short* tmpptr = tmp.channel(i / 8); const unsigned short* kptr = kernel.channel(p / 8); asm volatile( "ld1 {v0.4s, v1.4s}, [%20] \n" "dup v16.4s, v0.s[0] \n" "dup v17.4s, v0.s[0] \n" "dup v18.4s, v0.s[1] \n" "dup v19.4s, v0.s[1] \n" "dup v20.4s, v0.s[2] \n" "dup v21.4s, v0.s[2] \n" "dup v22.4s, v0.s[3] \n" "dup v23.4s, v0.s[3] \n" "dup v24.4s, v1.s[0] \n" "dup v25.4s, v1.s[0] \n" "dup v26.4s, v1.s[1] \n" "dup v27.4s, v1.s[1] \n" "dup v28.4s, v1.s[2] \n" "dup v29.4s, v1.s[2] \n" "dup v30.4s, v1.s[3] \n" "dup v31.4s, v1.s[3] \n" // inch loop "lsr w4, %w21, #2 \n" // w4 = nn = inch >> 2 "cmp w4, #0 \n" "beq 1f \n" "0: \n" "prfm pldl1keep, [%8, #256] \n" "ld1 {v8.4h, v9.4h, v10.4h, v11.4h}, [%8], #32 \n" "shll v8.4s, v8.4h, #16 \n" "shll v9.4s, v9.4h, #16 \n" "shll v10.4s, v10.4h, #16 \n" "shll v11.4s, v11.4h, #16 \n" "prfm pldl1keep, [%9, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%9], #32 \n" "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v18.4s, v8.4s, v0.s[1] \n" "fmla v20.4s, v8.4s, v0.s[2] \n" "fmla v22.4s, v8.4s, v0.s[3] \n" "fmla v17.4s, v9.4s, v0.s[0] \n" "fmla v19.4s, v9.4s, v0.s[1] \n" "fmla v21.4s, v9.4s, v0.s[2] \n" "fmla v23.4s, v9.4s, v0.s[3] \n" "fmla v24.4s, v8.4s, v1.s[0] \n" "fmla v26.4s, v8.4s, v1.s[1] \n" "fmla v28.4s, v8.4s, v1.s[2] \n" "fmla v30.4s, v8.4s, v1.s[3] \n" "fmla v25.4s, v9.4s, v1.s[0] \n" "fmla v27.4s, v9.4s, v1.s[1] \n" "fmla v29.4s, v9.4s, v1.s[2] \n" "fmla v31.4s, v9.4s, v1.s[3] \n" "prfm pldl1keep, [%8, #256] \n" "ld1 {v12.4h, v13.4h, v14.4h, v15.4h}, [%8], #32 \n" "shll v12.4s, v12.4h, #16 \n" "shll v13.4s, v13.4h, #16 \n" "shll v14.4s, v14.4h, #16 \n" "shll v15.4s, v15.4h, #16 \n" "fmla v16.4s, v10.4s, v2.s[0] \n" "fmla v18.4s, v10.4s, v2.s[1] \n" "fmla v20.4s, v10.4s, v2.s[2] \n" "fmla v22.4s, v10.4s, v2.s[3] \n" "fmla v17.4s, v11.4s, v2.s[0] \n" "fmla v19.4s, v11.4s, v2.s[1] \n" "fmla v21.4s, v11.4s, v2.s[2] \n" "fmla v23.4s, v11.4s, v2.s[3] \n" "fmla v24.4s, v10.4s, v3.s[0] \n" "fmla v26.4s, v10.4s, v3.s[1] \n" "fmla v28.4s, v10.4s, v3.s[2] \n" "fmla v30.4s, v10.4s, v3.s[3] \n" "fmla v25.4s, v11.4s, v3.s[0] \n" "fmla v27.4s, v11.4s, v3.s[1] \n" "fmla v29.4s, v11.4s, v3.s[2] \n" "fmla v31.4s, v11.4s, v3.s[3] \n" "prfm pldl1keep, [%9, #256] \n" "ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%9], #32 \n" "shll v4.4s, v4.4h, #16 \n" "shll v5.4s, v5.4h, #16 \n" "shll v6.4s, v6.4h, #16 \n" "shll v7.4s, v7.4h, #16 \n" "fmla v16.4s, v12.4s, v4.s[0] \n" "fmla v18.4s, v12.4s, v4.s[1] \n" "fmla v20.4s, v12.4s, v4.s[2] \n" "fmla v22.4s, v12.4s, v4.s[3] \n" "fmla v17.4s, v13.4s, v4.s[0] \n" "fmla v19.4s, v13.4s, v4.s[1] \n" "fmla v21.4s, v13.4s, v4.s[2] \n" "fmla v23.4s, v13.4s, v4.s[3] \n" "fmla v24.4s, v12.4s, v5.s[0] \n" "fmla v26.4s, v12.4s, v5.s[1] \n" "fmla v28.4s, v12.4s, v5.s[2] \n" "fmla v30.4s, v12.4s, v5.s[3] \n" "fmla v25.4s, v13.4s, v5.s[0] \n" "fmla v27.4s, v13.4s, v5.s[1] \n" "fmla v29.4s, v13.4s, v5.s[2] \n" "fmla v31.4s, v13.4s, v5.s[3] \n" "subs w4, w4, #1 \n" "fmla v16.4s, v14.4s, v6.s[0] \n" "fmla v18.4s, v14.4s, v6.s[1] \n" "fmla v20.4s, v14.4s, v6.s[2] \n" "fmla v22.4s, v14.4s, v6.s[3] \n" "fmla v17.4s, v15.4s, v6.s[0] \n" "fmla v19.4s, v15.4s, v6.s[1] \n" "fmla v21.4s, v15.4s, v6.s[2] \n" "fmla v23.4s, v15.4s, v6.s[3] \n" "fmla v24.4s, v14.4s, v7.s[0] \n" "fmla v26.4s, v14.4s, v7.s[1] \n" "fmla v28.4s, v14.4s, v7.s[2] \n" "fmla v30.4s, v14.4s, v7.s[3] \n" "fmla v25.4s, v15.4s, v7.s[0] \n" "fmla v27.4s, v15.4s, v7.s[1] \n" "fmla v29.4s, v15.4s, v7.s[2] \n" "fmla v31.4s, v15.4s, v7.s[3] \n" "bne 0b \n" "1: \n" // remain loop "and w4, %w21, #3 \n" // w4 = remain = inch & 3; "cmp w4, #0 \n" "beq 3f \n" "2: \n" "prfm pldl1keep, [%8, #128] \n" "ld1 {v8.4h, v9.4h}, [%8], #16 \n" "shll v8.4s, v8.4h, #16 \n" "shll v9.4s, v9.4h, #16 \n" "prfm pldl1keep, [%9, #128] \n" "ld1 {v0.4h, v1.4h}, [%9], #16 \n" "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v18.4s, v8.4s, v0.s[1] \n" "fmla v20.4s, v8.4s, v0.s[2] \n" "fmla v22.4s, v8.4s, v0.s[3] \n" "fmla v17.4s, v9.4s, v0.s[0] \n" "fmla v19.4s, v9.4s, v0.s[1] \n" "fmla v21.4s, v9.4s, v0.s[2] \n" "fmla v23.4s, v9.4s, v0.s[3] \n" "subs w4, w4, #1 \n" "fmla v24.4s, v8.4s, v1.s[0] \n" "fmla v26.4s, v8.4s, v1.s[1] \n" "fmla v28.4s, v8.4s, v1.s[2] \n" "fmla v30.4s, v8.4s, v1.s[3] \n" "fmla v25.4s, v9.4s, v1.s[0] \n" "fmla v27.4s, v9.4s, v1.s[1] \n" "fmla v29.4s, v9.4s, v1.s[2] \n" "fmla v31.4s, v9.4s, v1.s[3] \n" "bne 2b \n" "3: \n" "shrn v16.4h, v16.4s, #16 \n" "shrn v17.4h, v17.4s, #16 \n" "shrn v18.4h, v18.4s, #16 \n" "shrn v19.4h, v19.4s, #16 \n" "shrn v20.4h, v20.4s, #16 \n" "shrn v21.4h, v21.4s, #16 \n" "shrn v22.4h, v22.4s, #16 \n" "shrn v23.4h, v23.4s, #16 \n" "shrn v24.4h, v24.4s, #16 \n" "shrn v25.4h, v25.4s, #16 \n" "shrn v26.4h, v26.4s, #16 \n" "shrn v27.4h, v27.4s, #16 \n" "shrn v28.4h, v28.4s, #16 \n" "shrn v29.4h, v29.4s, #16 \n" "shrn v30.4h, v30.4s, #16 \n" "shrn v31.4h, v31.4s, #16 \n" "st1 {v16.4h, v17.4h}, [%0], #16 \n" "st1 {v18.4h, v19.4h}, [%1], #16 \n" "st1 {v20.4h, v21.4h}, [%2], #16 \n" "st1 {v22.4h, v23.4h}, [%3], #16 \n" "st1 {v24.4h, v25.4h}, [%4], #16 \n" "st1 {v26.4h, v27.4h}, [%5], #16 \n" "st1 {v28.4h, v29.4h}, [%6], #16 \n" "st1 {v30.4h, v31.4h}, [%7], #16 \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(outptr4), // %4 "=r"(outptr5), // %5 "=r"(outptr6), // %6 "=r"(outptr7), // %7 "=r"(tmpptr), // %8 "=r"(kptr) // %9 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(outptr4), "5"(outptr5), "6"(outptr6), "7"(outptr7), "8"(tmpptr), "9"(kptr), "r"(biasptr), // %20 "r"(inch) // %21 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 3 < size; i += 4) { const unsigned short* tmpptr = tmp.channel(i / 8 + (i % 8) / 4); const unsigned short* kptr = kernel.channel(p / 8); asm volatile( "ld1 {v0.4s, v1.4s}, [%20] \n" "dup v16.4s, v0.s[0] \n" "dup v17.4s, v0.s[1] \n" "dup v18.4s, v0.s[2] \n" "dup v19.4s, v0.s[3] \n" "dup v20.4s, v1.s[0] \n" "dup v21.4s, v1.s[1] \n" "dup v22.4s, v1.s[2] \n" "dup v23.4s, v1.s[3] \n" // inch loop "lsr w4, %w21, #2 \n" // w4 = nn = inch >> 2 "cmp w4, #0 \n" "beq 1f \n" "0: \n" "prfm pldl1keep, [%8, #256] \n" "ld1 {v8.4h, v9.4h, v10.4h, v11.4h}, [%8], #32 \n" "shll v8.4s, v8.4h, #16 \n" "shll v9.4s, v9.4h, #16 \n" "shll v10.4s, v10.4h, #16 \n" "shll v11.4s, v11.4h, #16 \n" "prfm pldl1keep, [%9, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%9], #32 \n" "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v0.s[1] \n" "fmla v18.4s, v8.4s, v0.s[2] \n" "fmla v19.4s, v8.4s, v0.s[3] \n" "fmla v20.4s, v8.4s, v1.s[0] \n" "fmla v21.4s, v8.4s, v1.s[1] \n" "fmla v22.4s, v8.4s, v1.s[2] \n" "fmla v23.4s, v8.4s, v1.s[3] \n" "prfm pldl1keep, [%9, #256] \n" "ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%9], #32 \n" "shll v4.4s, v4.4h, #16 \n" "shll v5.4s, v5.4h, #16 \n" "shll v6.4s, v6.4h, #16 \n" "shll v7.4s, v7.4h, #16 \n" "fmla v16.4s, v9.4s, v2.s[0] \n" "fmla v17.4s, v9.4s, v2.s[1] \n" "fmla v18.4s, v9.4s, v2.s[2] \n" "fmla v19.4s, v9.4s, v2.s[3] \n" "fmla v20.4s, v9.4s, v3.s[0] \n" "fmla v21.4s, v9.4s, v3.s[1] \n" "fmla v22.4s, v9.4s, v3.s[2] \n" "fmla v23.4s, v9.4s, v3.s[3] \n" "subs w4, w4, #1 \n" "fmla v16.4s, v10.4s, v4.s[0] \n" "fmla v17.4s, v10.4s, v4.s[1] \n" "fmla v18.4s, v10.4s, v4.s[2] \n" "fmla v19.4s, v10.4s, v4.s[3] \n" "fmla v20.4s, v10.4s, v5.s[0] \n" "fmla v21.4s, v10.4s, v5.s[1] \n" "fmla v22.4s, v10.4s, v5.s[2] \n" "fmla v23.4s, v10.4s, v5.s[3] \n" "fmla v16.4s, v11.4s, v6.s[0] \n" "fmla v17.4s, v11.4s, v6.s[1] \n" "fmla v18.4s, v11.4s, v6.s[2] \n" "fmla v19.4s, v11.4s, v6.s[3] \n" "fmla v20.4s, v11.4s, v7.s[0] \n" "fmla v21.4s, v11.4s, v7.s[1] \n" "fmla v22.4s, v11.4s, v7.s[2] \n" "fmla v23.4s, v11.4s, v7.s[3] \n" "bne 0b \n" "1: \n" // remain loop "and w4, %w21, #3 \n" // w4 = remain = inch & 3; "cmp w4, #0 \n" "beq 3f \n" "2: \n" "prfm pldl1keep, [%8, #64] \n" "ld1 {v8.4h}, [%8], #8 \n" "shll v8.4s, v8.4h, #16 \n" "prfm pldl1keep, [%9, #128] \n" "ld1 {v0.4h, v1.4h}, [%9], #16 \n" "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v0.s[1] \n" "fmla v18.4s, v8.4s, v0.s[2] \n" "fmla v19.4s, v8.4s, v0.s[3] \n" "subs w4, w4, #1 \n" "fmla v20.4s, v8.4s, v1.s[0] \n" "fmla v21.4s, v8.4s, v1.s[1] \n" "fmla v22.4s, v8.4s, v1.s[2] \n" "fmla v23.4s, v8.4s, v1.s[3] \n" "bne 2b \n" "3: \n" "shrn v16.4h, v16.4s, #16 \n" "shrn v17.4h, v17.4s, #16 \n" "shrn v18.4h, v18.4s, #16 \n" "shrn v19.4h, v19.4s, #16 \n" "shrn v20.4h, v20.4s, #16 \n" "shrn v21.4h, v21.4s, #16 \n" "shrn v22.4h, v22.4s, #16 \n" "shrn v23.4h, v23.4s, #16 \n" "st1 {v16.4h}, [%0], #8 \n" "st1 {v17.4h}, [%1], #8 \n" "st1 {v18.4h}, [%2], #8 \n" "st1 {v19.4h}, [%3], #8 \n" "st1 {v20.4h}, [%4], #8 \n" "st1 {v21.4h}, [%5], #8 \n" "st1 {v22.4h}, [%6], #8 \n" "st1 {v23.4h}, [%7], #8 \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(outptr4), // %4 "=r"(outptr5), // %5 "=r"(outptr6), // %6 "=r"(outptr7), // %7 "=r"(tmpptr), // %8 "=r"(kptr) // %9 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(outptr4), "5"(outptr5), "6"(outptr6), "7"(outptr7), "8"(tmpptr), "9"(kptr), "r"(biasptr), // %20 "r"(inch) // %21 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); } for (; i < size; i++) { const unsigned short* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4); const unsigned short* kptr = kernel.channel(p / 8); asm volatile( "ld1 {v24.4s, v25.4s}, [%20] \n" // inch loop "lsr w4, %w21, #2 \n" // w4 = nn = inch >> 2 "cmp w4, #0 \n" "beq 1f \n" "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "eor v20.16b, v20.16b, v20.16b \n" "eor v21.16b, v21.16b, v21.16b \n" "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" "0: \n" "prfm pldl1keep, [%8, #64] \n" "ld1 {v8.4h}, [%8], #8 \n" "shll v8.4s, v8.4h, #16 \n" "prfm pldl1keep, [%9, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%9], #32 \n" "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "fmla v16.4s, v0.4s, v8.s[0] \n" "fmla v17.4s, v1.4s, v8.s[0] \n" "fmla v18.4s, v2.4s, v8.s[1] \n" "fmla v19.4s, v3.4s, v8.s[1] \n" "prfm pldl1keep, [%9, #256] \n" "ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%9], #32 \n" "shll v4.4s, v4.4h, #16 \n" "shll v5.4s, v5.4h, #16 \n" "shll v6.4s, v6.4h, #16 \n" "shll v7.4s, v7.4h, #16 \n" "subs w4, w4, #1 \n" "fmla v20.4s, v4.4s, v8.s[2] \n" "fmla v21.4s, v5.4s, v8.s[2] \n" "fmla v22.4s, v6.4s, v8.s[3] \n" "fmla v23.4s, v7.4s, v8.s[3] \n" "bne 0b \n" "fadd v16.4s, v16.4s, v18.4s \n" "fadd v17.4s, v17.4s, v19.4s \n" "fadd v20.4s, v20.4s, v22.4s \n" "fadd v21.4s, v21.4s, v23.4s \n" "fadd v16.4s, v16.4s, v20.4s \n" "fadd v17.4s, v17.4s, v21.4s \n" "fadd v24.4s, v24.4s, v16.4s \n" "fadd v25.4s, v25.4s, v17.4s \n" "1: \n" // remain loop "and w4, %w21, #3 \n" // w4 = remain = inch & 3; "cmp w4, #0 \n" "beq 3f \n" "2: \n" "prfm pldl1keep, [%8, #16] \n" "ld1r {v8.4h}, [%8], #2 \n" "shll v8.4s, v8.4h, #16 \n" "prfm pldl1keep, [%9, #128] \n" "ld1 {v0.4h, v1.4h}, [%9], #16 \n" "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "subs w4, w4, #1 \n" "fmla v24.4s, v8.4s, v0.4s \n" "fmla v25.4s, v8.4s, v1.4s \n" "bne 2b \n" "3: \n" "shrn v24.4h, v24.4s, #16 \n" "shrn v25.4h, v25.4s, #16 \n" "st1 {v24.h}[0],[%0], #2 \n" "st1 {v24.h}[1],[%1], #2 \n" "st1 {v24.h}[2],[%2], #2 \n" "st1 {v24.h}[3],[%3], #2 \n" "st1 {v25.h}[0],[%4], #2 \n" "st1 {v25.h}[1],[%5], #2 \n" "st1 {v25.h}[2],[%6], #2 \n" "st1 {v25.h}[3],[%7], #2 \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(outptr4), // %4 "=r"(outptr5), // %5 "=r"(outptr6), // %6 "=r"(outptr7), // %7 "=r"(tmpptr), // %8 "=r"(kptr) // %9 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(outptr4), "5"(outptr5), "6"(outptr6), "7"(outptr7), "8"(tmpptr), "9"(kptr), "r"(biasptr), // %20 "r"(inch) // %21 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25"); } } #endif // __ARM_NEON && __aarch64__ nn_outch = (outch - remain_outch_start) >> 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = remain_outch_start + pp * 4; unsigned short* outptr0 = top_blob.channel(p); unsigned short* outptr1 = top_blob.channel(p + 1); unsigned short* outptr2 = top_blob.channel(p + 2); unsigned short* outptr3 = top_blob.channel(p + 3); const float zeros[4] = {0.f, 0.f, 0.f, 0.f}; const float* biasptr = bias ? bias + p : zeros; int i = 0; for (; i + 7 < size; i += 8) { const unsigned short* tmpptr = tmp.channel(i / 8); #if __ARM_NEON && __aarch64__ const unsigned short* kptr = kernel.channel(p / 8 + (p % 8) / 4); #else const unsigned short* kptr = kernel.channel(p / 4); #endif // __ARM_NEON && __aarch64__ #if __ARM_NEON #if __aarch64__ asm volatile( "ld1 {v0.4s}, [%12] \n" "dup v8.4s, v0.s[0] \n" "dup v9.4s, v0.s[0] \n" "dup v10.4s, v0.s[1] \n" "dup v11.4s, v0.s[1] \n" "dup v12.4s, v0.s[2] \n" "dup v13.4s, v0.s[2] \n" "dup v14.4s, v0.s[3] \n" "dup v15.4s, v0.s[3] \n" // inch loop "lsr w4, %w13, #2 \n" // w4 = nn = inch >> 2 "cmp w4, #0 \n" "beq 1f \n" "0: \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%4], #32 \n" "shll v4.4s, v4.4h, #16 \n" "shll v5.4s, v5.4h, #16 \n" "shll v6.4s, v6.4h, #16 \n" "shll v7.4s, v7.4h, #16 \n" "prfm pldl1keep, [%5, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%5], #32 \n" "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v10.4s, v4.4s, v0.s[1] \n" "fmla v12.4s, v4.4s, v0.s[2] \n" "fmla v14.4s, v4.4s, v0.s[3] \n" "fmla v9.4s, v5.4s, v0.s[0] \n" "fmla v11.4s, v5.4s, v0.s[1] \n" "fmla v13.4s, v5.4s, v0.s[2] \n" "fmla v15.4s, v5.4s, v0.s[3] \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%4], #32 \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "fmla v8.4s, v6.4s, v1.s[0] \n" "fmla v10.4s, v6.4s, v1.s[1] \n" "fmla v12.4s, v6.4s, v1.s[2] \n" "fmla v14.4s, v6.4s, v1.s[3] \n" "fmla v9.4s, v7.4s, v1.s[0] \n" "fmla v11.4s, v7.4s, v1.s[1] \n" "fmla v13.4s, v7.4s, v1.s[2] \n" "fmla v15.4s, v7.4s, v1.s[3] \n" "subs w4, w4, #1 \n" "fmla v8.4s, v16.4s, v2.s[0] \n" "fmla v10.4s, v16.4s, v2.s[1] \n" "fmla v12.4s, v16.4s, v2.s[2] \n" "fmla v14.4s, v16.4s, v2.s[3] \n" "fmla v9.4s, v17.4s, v2.s[0] \n" "fmla v11.4s, v17.4s, v2.s[1] \n" "fmla v13.4s, v17.4s, v2.s[2] \n" "fmla v15.4s, v17.4s, v2.s[3] \n" "fmla v8.4s, v18.4s, v3.s[0] \n" "fmla v10.4s, v18.4s, v3.s[1] \n" "fmla v12.4s, v18.4s, v3.s[2] \n" "fmla v14.4s, v18.4s, v3.s[3] \n" "fmla v9.4s, v19.4s, v3.s[0] \n" "fmla v11.4s, v19.4s, v3.s[1] \n" "fmla v13.4s, v19.4s, v3.s[2] \n" "fmla v15.4s, v19.4s, v3.s[3] \n" "bne 0b \n" "1: \n" // remain loop "and w4, %w13, #3 \n" // w4 = remain = inch & 3; "cmp w4, #0 \n" "beq 3f \n" "2: \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v4.4h, v5.4h}, [%4], #16 \n" "shll v4.4s, v4.4h, #16 \n" "shll v5.4s, v5.4h, #16 \n" "prfm pldl1keep, [%5, #64] \n" "ld1 {v0.4h}, [%5], #8 \n" "shll v0.4s, v0.4h, #16 \n" "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v10.4s, v4.4s, v0.s[1] \n" "fmla v12.4s, v4.4s, v0.s[2] \n" "fmla v14.4s, v4.4s, v0.s[3] \n" "subs w4, w4, #1 \n" "fmla v9.4s, v5.4s, v0.s[0] \n" "fmla v11.4s, v5.4s, v0.s[1] \n" "fmla v13.4s, v5.4s, v0.s[2] \n" "fmla v15.4s, v5.4s, v0.s[3] \n" "bne 2b \n" "3: \n" "shrn v8.4h, v8.4s, #16 \n" "shrn v9.4h, v9.4s, #16 \n" "shrn v10.4h, v10.4s, #16 \n" "shrn v11.4h, v11.4s, #16 \n" "shrn v12.4h, v12.4s, #16 \n" "shrn v13.4h, v13.4s, #16 \n" "shrn v14.4h, v14.4s, #16 \n" "shrn v15.4h, v15.4s, #16 \n" "st1 {v8.4h, v9.4h}, [%0], #16 \n" "st1 {v10.4h, v11.4h}, [%1], #16 \n" "st1 {v12.4h, v13.4h}, [%2], #16 \n" "st1 {v14.4h, v15.4h}, [%3], #16 \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(tmpptr), // %4 "=r"(kptr) // %5 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(tmpptr), "5"(kptr), "r"(biasptr), // %12 "r"(inch) // %13 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19"); #else // __aarch64__ asm volatile( "vld1.f32 {d0-d1}, [%12] \n" "vdup.f32 q8, d0[0] \n" "vdup.f32 q9, d0[0] \n" "vdup.f32 q10, d0[1] \n" "vdup.f32 q11, d0[1] \n" "vdup.f32 q12, d1[0] \n" "vdup.f32 q13, d1[0] \n" "vdup.f32 q14, d1[1] \n" "vdup.f32 q15, d1[1] \n" // inch loop "lsr r4, %13, #2 \n" // r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n" "pld [%4, #256] \n" "vld1.u16 {d12-d15}, [%4 :64]! \n" "vshll.u16 q4, d12, #16 \n" "vshll.u16 q5, d13, #16 \n" "vshll.u16 q6, d14, #16 \n" "vshll.u16 q7, d15, #16 \n" "pld [%5, #256] \n" "vld1.u16 {d4-d7}, [%5 :64]! \n" "vshll.u16 q0, d4, #16 \n" "vshll.u16 q1, d5, #16 \n" "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q10, q4, d0[1] \n" "vmla.f32 q12, q4, d1[0] \n" "vmla.f32 q14, q4, d1[1] \n" "vmla.f32 q9, q5, d0[0] \n" "vmla.f32 q11, q5, d0[1] \n" "vmla.f32 q13, q5, d1[0] \n" "vmla.f32 q15, q5, d1[1] \n" "vmla.f32 q8, q6, d2[0] \n" "vmla.f32 q10, q6, d2[1] \n" "vmla.f32 q12, q6, d3[0] \n" "vmla.f32 q14, q6, d3[1] \n" "vmla.f32 q9, q7, d2[0] \n" "vmla.f32 q11, q7, d2[1] \n" "vmla.f32 q13, q7, d3[0] \n" "vmla.f32 q15, q7, d3[1] \n" "pld [%4, #256] \n" "vld1.u16 {d12-d15}, [%4 :64]! \n" "vshll.u16 q4, d12, #16 \n" "vshll.u16 q5, d13, #16 \n" "vshll.u16 q6, d14, #16 \n" "vshll.u16 q7, d15, #16 \n" "vmla.f32 q8, q4, d4[0] \n" "vmla.f32 q10, q4, d4[1] \n" "vmla.f32 q12, q4, d5[0] \n" "vmla.f32 q14, q4, d5[1] \n" "vmla.f32 q9, q5, d4[0] \n" "vmla.f32 q11, q5, d4[1] \n" "vmla.f32 q13, q5, d5[0] \n" "vmla.f32 q15, q5, d5[1] \n" "subs r4, r4, #1 \n" "vmla.f32 q8, q6, d6[0] \n" "vmla.f32 q10, q6, d6[1] \n" "vmla.f32 q12, q6, d7[0] \n" "vmla.f32 q14, q6, d7[1] \n" "vmla.f32 q9, q7, d6[0] \n" "vmla.f32 q11, q7, d6[1] \n" "vmla.f32 q13, q7, d7[0] \n" "vmla.f32 q15, q7, d7[1] \n" "bne 0b \n" "1: \n" // remain loop "and r4, %13, #3 \n" // r4 = remain = inch & 3; "cmp r4, #0 \n" "beq 3f \n" "2: \n" "pld [%4, #128] \n" "vld1.u16 {d10-d11}, [%4 :64]! \n" "vshll.u16 q4, d10, #16 \n" "vshll.u16 q5, d11, #16 \n" "pld [%5, #64] \n" "vld1.u16 {d1}, [%5 :64]! \n" "vshll.u16 q0, d1, #16 \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q10, q4, d0[1] \n" "vmla.f32 q12, q4, d1[0] \n" "vmla.f32 q14, q4, d1[1] \n" "subs r4, r4, #1 \n" "vmla.f32 q9, q5, d0[0] \n" "vmla.f32 q11, q5, d0[1] \n" "vmla.f32 q13, q5, d1[0] \n" "vmla.f32 q15, q5, d1[1] \n" "bne 2b \n" "3: \n" "vshrn.u32 d16, q8, #16 \n" "vshrn.u32 d17, q9, #16 \n" "vshrn.u32 d20, q10, #16 \n" "vshrn.u32 d21, q11, #16 \n" "vshrn.u32 d24, q12, #16 \n" "vshrn.u32 d25, q13, #16 \n" "vshrn.u32 d28, q14, #16 \n" "vshrn.u32 d29, q15, #16 \n" "vst1.u16 {d16-d17}, [%0 :64]! \n" "vst1.u16 {d20-d21}, [%1 :64]! \n" "vst1.u16 {d24-d25}, [%2 :64]! \n" "vst1.u16 {d28-d29}, [%3 :64]! \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(tmpptr), // %4 "=r"(kptr) // %5 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(tmpptr), "5"(kptr), "r"(biasptr), // %12 "r"(inch) // %13 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #endif // __aarch64__ #else float sum0_0 = biasptr[0]; float sum0_1 = biasptr[0]; float sum0_2 = biasptr[0]; float sum0_3 = biasptr[0]; float sum0_4 = biasptr[0]; float sum0_5 = biasptr[0]; float sum0_6 = biasptr[0]; float sum0_7 = biasptr[0]; float sum1_0 = biasptr[1]; float sum1_1 = biasptr[1]; float sum1_2 = biasptr[1]; float sum1_3 = biasptr[1]; float sum1_4 = biasptr[1]; float sum1_5 = biasptr[1]; float sum1_6 = biasptr[1]; float sum1_7 = biasptr[1]; float sum2_0 = biasptr[2]; float sum2_1 = biasptr[2]; float sum2_2 = biasptr[2]; float sum2_3 = biasptr[2]; float sum2_4 = biasptr[2]; float sum2_5 = biasptr[2]; float sum2_6 = biasptr[2]; float sum2_7 = biasptr[2]; float sum3_0 = biasptr[3]; float sum3_1 = biasptr[3]; float sum3_2 = biasptr[3]; float sum3_3 = biasptr[3]; float sum3_4 = biasptr[3]; float sum3_5 = biasptr[3]; float sum3_6 = biasptr[3]; float sum3_7 = biasptr[3]; for (int q = 0; q < inch; q++) { sum0_0 += bfloat16_to_float32(tmpptr[0]) * bfloat16_to_float32(kptr[0]); sum0_1 += bfloat16_to_float32(tmpptr[1]) * bfloat16_to_float32(kptr[0]); sum0_2 += bfloat16_to_float32(tmpptr[2]) * bfloat16_to_float32(kptr[0]); sum0_3 += bfloat16_to_float32(tmpptr[3]) * bfloat16_to_float32(kptr[0]); sum0_4 += bfloat16_to_float32(tmpptr[4]) * bfloat16_to_float32(kptr[0]); sum0_5 += bfloat16_to_float32(tmpptr[5]) * bfloat16_to_float32(kptr[0]); sum0_6 += bfloat16_to_float32(tmpptr[6]) * bfloat16_to_float32(kptr[0]); sum0_7 += bfloat16_to_float32(tmpptr[7]) * bfloat16_to_float32(kptr[0]); sum1_0 += bfloat16_to_float32(tmpptr[0]) * bfloat16_to_float32(kptr[1]); sum1_1 += bfloat16_to_float32(tmpptr[1]) * bfloat16_to_float32(kptr[1]); sum1_2 += bfloat16_to_float32(tmpptr[2]) * bfloat16_to_float32(kptr[1]); sum1_3 += bfloat16_to_float32(tmpptr[3]) * bfloat16_to_float32(kptr[1]); sum1_4 += bfloat16_to_float32(tmpptr[4]) * bfloat16_to_float32(kptr[1]); sum1_5 += bfloat16_to_float32(tmpptr[5]) * bfloat16_to_float32(kptr[1]); sum1_6 += bfloat16_to_float32(tmpptr[6]) * bfloat16_to_float32(kptr[1]); sum1_7 += bfloat16_to_float32(tmpptr[7]) * bfloat16_to_float32(kptr[1]); sum2_0 += bfloat16_to_float32(tmpptr[0]) * bfloat16_to_float32(kptr[2]); sum2_1 += bfloat16_to_float32(tmpptr[1]) * bfloat16_to_float32(kptr[2]); sum2_2 += bfloat16_to_float32(tmpptr[2]) * bfloat16_to_float32(kptr[2]); sum2_3 += bfloat16_to_float32(tmpptr[3]) * bfloat16_to_float32(kptr[2]); sum2_4 += bfloat16_to_float32(tmpptr[4]) * bfloat16_to_float32(kptr[2]); sum2_5 += bfloat16_to_float32(tmpptr[5]) * bfloat16_to_float32(kptr[2]); sum2_6 += bfloat16_to_float32(tmpptr[6]) * bfloat16_to_float32(kptr[2]); sum2_7 += bfloat16_to_float32(tmpptr[7]) * bfloat16_to_float32(kptr[2]); sum3_0 += bfloat16_to_float32(tmpptr[0]) * bfloat16_to_float32(kptr[3]); sum3_1 += bfloat16_to_float32(tmpptr[1]) * bfloat16_to_float32(kptr[3]); sum3_2 += bfloat16_to_float32(tmpptr[2]) * bfloat16_to_float32(kptr[3]); sum3_3 += bfloat16_to_float32(tmpptr[3]) * bfloat16_to_float32(kptr[3]); sum3_4 += bfloat16_to_float32(tmpptr[4]) * bfloat16_to_float32(kptr[3]); sum3_5 += bfloat16_to_float32(tmpptr[5]) * bfloat16_to_float32(kptr[3]); sum3_6 += bfloat16_to_float32(tmpptr[6]) * bfloat16_to_float32(kptr[3]); sum3_7 += bfloat16_to_float32(tmpptr[7]) * bfloat16_to_float32(kptr[3]); tmpptr += 8; kptr += 4; } outptr0[0] = float32_to_bfloat16(sum0_0); outptr0[1] = float32_to_bfloat16(sum0_1); outptr0[2] = float32_to_bfloat16(sum0_2); outptr0[3] = float32_to_bfloat16(sum0_3); outptr0[4] = float32_to_bfloat16(sum0_4); outptr0[5] = float32_to_bfloat16(sum0_5); outptr0[6] = float32_to_bfloat16(sum0_6); outptr0[7] = float32_to_bfloat16(sum0_7); outptr1[0] = float32_to_bfloat16(sum1_0); outptr1[1] = float32_to_bfloat16(sum1_1); outptr1[2] = float32_to_bfloat16(sum1_2); outptr1[3] = float32_to_bfloat16(sum1_3); outptr1[4] = float32_to_bfloat16(sum1_4); outptr1[5] = float32_to_bfloat16(sum1_5); outptr1[6] = float32_to_bfloat16(sum1_6); outptr1[7] = float32_to_bfloat16(sum1_7); outptr2[0] = float32_to_bfloat16(sum2_0); outptr2[1] = float32_to_bfloat16(sum2_1); outptr2[2] = float32_to_bfloat16(sum2_2); outptr2[3] = float32_to_bfloat16(sum2_3); outptr2[4] = float32_to_bfloat16(sum2_4); outptr2[5] = float32_to_bfloat16(sum2_5); outptr2[6] = float32_to_bfloat16(sum2_6); outptr2[7] = float32_to_bfloat16(sum2_7); outptr3[0] = float32_to_bfloat16(sum3_0); outptr3[1] = float32_to_bfloat16(sum3_1); outptr3[2] = float32_to_bfloat16(sum3_2); outptr3[3] = float32_to_bfloat16(sum3_3); outptr3[4] = float32_to_bfloat16(sum3_4); outptr3[5] = float32_to_bfloat16(sum3_5); outptr3[6] = float32_to_bfloat16(sum3_6); outptr3[7] = float32_to_bfloat16(sum3_7); outptr0 += 8; outptr1 += 8; outptr2 += 8; outptr3 += 8; #endif // __ARM_NEON } for (; i + 3 < size; i += 4) { const unsigned short* tmpptr = tmp.channel(i / 8 + (i % 8) / 4); #if __ARM_NEON && __aarch64__ const unsigned short* kptr = kernel.channel(p / 8 + (p % 8) / 4); #else const unsigned short* kptr = kernel.channel(p / 4); #endif // __ARM_NEON && __aarch64__ #if __ARM_NEON #if __aarch64__ asm volatile( "ld1 {v0.4s}, [%12] \n" "dup v8.4s, v0.s[0] \n" "dup v9.4s, v0.s[1] \n" "dup v10.4s, v0.s[2] \n" "dup v11.4s, v0.s[3] \n" // inch loop "lsr w4, %w13, #2 \n" // w4 = nn = inch >> 2 "cmp w4, #0 \n" "beq 1f \n" "0: \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%4], #32 \n" "shll v4.4s, v4.4h, #16 \n" "shll v5.4s, v5.4h, #16 \n" "shll v6.4s, v6.4h, #16 \n" "shll v7.4s, v7.4h, #16 \n" "prfm pldl1keep, [%5, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%5], #32 \n" "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v9.4s, v4.4s, v0.s[1] \n" "fmla v10.4s, v4.4s, v0.s[2] \n" "fmla v11.4s, v4.4s, v0.s[3] \n" "fmla v8.4s, v5.4s, v1.s[0] \n" "fmla v9.4s, v5.4s, v1.s[1] \n" "fmla v10.4s, v5.4s, v1.s[2] \n" "fmla v11.4s, v5.4s, v1.s[3] \n" "subs w4, w4, #1 \n" "fmla v8.4s, v6.4s, v2.s[0] \n" "fmla v9.4s, v6.4s, v2.s[1] \n" "fmla v10.4s, v6.4s, v2.s[2] \n" "fmla v11.4s, v6.4s, v2.s[3] \n" "fmla v8.4s, v7.4s, v3.s[0] \n" "fmla v9.4s, v7.4s, v3.s[1] \n" "fmla v10.4s, v7.4s, v3.s[2] \n" "fmla v11.4s, v7.4s, v3.s[3] \n" "bne 0b \n" "1: \n" // remain loop "and w4, %w13, #3 \n" // w4 = remain = inch & 3; "cmp w4, #0 \n" "beq 3f \n" "2: \n" "prfm pldl1keep, [%4, #64] \n" "ld1 {v4.4h}, [%4], #8 \n" "shll v4.4s, v4.4h, #16 \n" "prfm pldl1keep, [%5, #64] \n" "ld1 {v0.4h}, [%5], #8 \n" "shll v0.4s, v0.4h, #16 \n" "subs w4, w4, #1 \n" "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v9.4s, v4.4s, v0.s[1] \n" "fmla v10.4s, v4.4s, v0.s[2] \n" "fmla v11.4s, v4.4s, v0.s[3] \n" "bne 2b \n" "3: \n" "shrn v8.4h, v8.4s, #16 \n" "shrn v9.4h, v9.4s, #16 \n" "shrn v10.4h, v10.4s, #16 \n" "shrn v11.4h, v11.4s, #16 \n" "st1 {v8.4h}, [%0], #8 \n" "st1 {v9.4h}, [%1], #8 \n" "st1 {v10.4h}, [%2], #8 \n" "st1 {v11.4h}, [%3], #8 \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(tmpptr), // %4 "=r"(kptr) // %5 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(tmpptr), "5"(kptr), "r"(biasptr), // %12 "r"(inch) // %13 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11"); #else // __aarch64__ asm volatile( "vld1.f32 {d0-d1}, [%12] \n" "vdup.f32 q8, d0[0] \n" "vdup.f32 q9, d0[1] \n" "vdup.f32 q10, d1[0] \n" "vdup.f32 q11, d1[1] \n" // inch loop "lsr r4, %13, #2 \n" // r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n" "pld [%4, #256] \n" "vld1.u16 {d12-d15}, [%4 :64]! \n" "vshll.u16 q4, d12, #16 \n" "vshll.u16 q5, d13, #16 \n" "vshll.u16 q6, d14, #16 \n" "vshll.u16 q7, d15, #16 \n" "pld [%5, #256] \n" "vld1.u16 {d4-d7}, [%5 :64]! \n" "vshll.u16 q0, d4, #16 \n" "vshll.u16 q1, d5, #16 \n" "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q4, d0[1] \n" "vmla.f32 q10, q4, d1[0] \n" "vmla.f32 q11, q4, d1[1] \n" "vmla.f32 q8, q5, d2[0] \n" "vmla.f32 q9, q5, d2[1] \n" "vmla.f32 q10, q5, d3[0] \n" "vmla.f32 q11, q5, d3[1] \n" "subs r4, r4, #1 \n" "vmla.f32 q8, q6, d4[0] \n" "vmla.f32 q9, q6, d4[1] \n" "vmla.f32 q10, q6, d5[0] \n" "vmla.f32 q11, q6, d5[1] \n" "vmla.f32 q8, q7, d6[0] \n" "vmla.f32 q9, q7, d6[1] \n" "vmla.f32 q10, q7, d7[0] \n" "vmla.f32 q11, q7, d7[1] \n" "bne 0b \n" "1: \n" // remain loop "and r4, %13, #3 \n" // r4 = remain = inch & 3; "cmp r4, #0 \n" "beq 3f \n" "2: \n" "pld [%4, #64] \n" "vld1.u16 {d9}, [%4 :64]! \n" "vshll.u16 q4, d9, #16 \n" "pld [%5, #64] \n" "vld1.u16 {d1}, [%5 :64]! \n" "vshll.u16 q0, d1, #16 \n" "subs r4, r4, #1 \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q4, d0[1] \n" "vmla.f32 q10, q4, d1[0] \n" "vmla.f32 q11, q4, d1[1] \n" "bne 2b \n" "3: \n" "vshrn.u32 d16, q8, #16 \n" "vshrn.u32 d18, q9, #16 \n" "vshrn.u32 d20, q10, #16 \n" "vshrn.u32 d22, q11, #16 \n" "vst1.u16 {d16}, [%0 :64]! \n" "vst1.u16 {d18}, [%1 :64]! \n" "vst1.u16 {d20}, [%2 :64]! \n" "vst1.u16 {d22}, [%3 :64]! \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(tmpptr), // %4 "=r"(kptr) // %5 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(tmpptr), "5"(kptr), "r"(biasptr), // %12 "r"(inch) // %13 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11"); #endif // __aarch64__ #else float sum0_0 = biasptr[0]; float sum0_1 = biasptr[0]; float sum0_2 = biasptr[0]; float sum0_3 = biasptr[0]; float sum1_0 = biasptr[1]; float sum1_1 = biasptr[1]; float sum1_2 = biasptr[1]; float sum1_3 = biasptr[1]; float sum2_0 = biasptr[2]; float sum2_1 = biasptr[2]; float sum2_2 = biasptr[2]; float sum2_3 = biasptr[2]; float sum3_0 = biasptr[3]; float sum3_1 = biasptr[3]; float sum3_2 = biasptr[3]; float sum3_3 = biasptr[3]; for (int q = 0; q < inch; q++) { sum0_0 += bfloat16_to_float32(tmpptr[0]) * bfloat16_to_float32(kptr[0]); sum0_1 += bfloat16_to_float32(tmpptr[1]) * bfloat16_to_float32(kptr[0]); sum0_2 += bfloat16_to_float32(tmpptr[2]) * bfloat16_to_float32(kptr[0]); sum0_3 += bfloat16_to_float32(tmpptr[3]) * bfloat16_to_float32(kptr[0]); sum1_0 += bfloat16_to_float32(tmpptr[0]) * bfloat16_to_float32(kptr[1]); sum1_1 += bfloat16_to_float32(tmpptr[1]) * bfloat16_to_float32(kptr[1]); sum1_2 += bfloat16_to_float32(tmpptr[2]) * bfloat16_to_float32(kptr[1]); sum1_3 += bfloat16_to_float32(tmpptr[3]) * bfloat16_to_float32(kptr[1]); sum2_0 += bfloat16_to_float32(tmpptr[0]) * bfloat16_to_float32(kptr[2]); sum2_1 += bfloat16_to_float32(tmpptr[1]) * bfloat16_to_float32(kptr[2]); sum2_2 += bfloat16_to_float32(tmpptr[2]) * bfloat16_to_float32(kptr[2]); sum2_3 += bfloat16_to_float32(tmpptr[3]) * bfloat16_to_float32(kptr[2]); sum3_0 += bfloat16_to_float32(tmpptr[0]) * bfloat16_to_float32(kptr[3]); sum3_1 += bfloat16_to_float32(tmpptr[1]) * bfloat16_to_float32(kptr[3]); sum3_2 += bfloat16_to_float32(tmpptr[2]) * bfloat16_to_float32(kptr[3]); sum3_3 += bfloat16_to_float32(tmpptr[3]) * bfloat16_to_float32(kptr[3]); tmpptr += 4; kptr += 4; } outptr0[0] = float32_to_bfloat16(sum0_0); outptr0[1] = float32_to_bfloat16(sum0_1); outptr0[2] = float32_to_bfloat16(sum0_2); outptr0[3] = float32_to_bfloat16(sum0_3); outptr1[0] = float32_to_bfloat16(sum1_0); outptr1[1] = float32_to_bfloat16(sum1_1); outptr1[2] = float32_to_bfloat16(sum1_2); outptr1[3] = float32_to_bfloat16(sum1_3); outptr2[0] = float32_to_bfloat16(sum2_0); outptr2[1] = float32_to_bfloat16(sum2_1); outptr2[2] = float32_to_bfloat16(sum2_2); outptr2[3] = float32_to_bfloat16(sum2_3); outptr3[0] = float32_to_bfloat16(sum3_0); outptr3[1] = float32_to_bfloat16(sum3_1); outptr3[2] = float32_to_bfloat16(sum3_2); outptr3[3] = float32_to_bfloat16(sum3_3); outptr0 += 4; outptr1 += 4; outptr2 += 4; outptr3 += 4; #endif // __ARM_NEON } for (; i < size; i++) { const unsigned short* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4); #if __ARM_NEON && __aarch64__ const unsigned short* kptr = kernel.channel(p / 8 + (p % 8) / 4); #else const unsigned short* kptr = kernel.channel(p / 4); #endif // __ARM_NEON && __aarch64__ #if __ARM_NEON #if __aarch64__ asm volatile( "ld1 {v12.4s}, [%12] \n" // inch loop "lsr w4, %w13, #2 \n" // w4 = nn = inch >> 2 "cmp w4, #0 \n" "beq 1f \n" "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "0: \n" "prfm pldl1keep, [%4, #64] \n" "ld1 {v4.4h}, [%4], #8 \n" "shll v4.4s, v4.4h, #16 \n" "prfm pldl1keep, [%5, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%5], #32 \n" "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "subs w4, w4, #1 \n" "fmla v8.4s, v0.4s, v4.s[0] \n" "fmla v9.4s, v1.4s, v4.s[1] \n" "fmla v10.4s, v2.4s, v4.s[2] \n" "fmla v11.4s, v3.4s, v4.s[3] \n" "bne 0b \n" "fadd v8.4s, v8.4s, v9.4s \n" "fadd v10.4s, v10.4s, v11.4s \n" "fadd v8.4s, v8.4s, v10.4s \n" "fadd v12.4s, v12.4s, v8.4s \n" "1: \n" // remain loop "and w4, %w13, #3 \n" // w4 = remain = inch & 3; "cmp w4, #0 \n" "beq 3f \n" "2: \n" "prfm pldl1keep, [%4, #16] \n" "ld1r {v4.4h}, [%4], #2 \n" "shll v4.4s, v4.4h, #16 \n" "prfm pldl1keep, [%5, #64] \n" "ld1 {v0.4h}, [%5], #8 \n" "shll v0.4s, v0.4h, #16 \n" "subs w4, w4, #1 \n" "fmla v12.4s, v4.4s, v0.4s \n" "bne 2b \n" "3: \n" "shrn v12.4h, v12.4s, #16 \n" "st1 {v12.h}[0], [%0], #2 \n" "st1 {v12.h}[1], [%1], #2 \n" "st1 {v12.h}[2], [%2], #2 \n" "st1 {v12.h}[3], [%3], #2 \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(tmpptr), // %4 "=r"(kptr) // %5 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(tmpptr), "5"(kptr), "r"(biasptr), // %12 "r"(inch) // %13 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v8", "v9", "v10", "v11", "v12"); #else // __aarch64__ asm volatile( "vld1.f32 {d24-d25}, [%12] \n" // inch loop "lsr r4, %13, #2 \n" // r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "veor q8, q8, q8 \n" "veor q9, q9, q9 \n" "veor q10, q10, q10 \n" "veor q11, q11, q11 \n" "0: \n" "pld [%4, #64] \n" "vld1.u16 {d9}, [%4 :64]! \n" "vshll.u16 q4, d9, #16 \n" "pld [%5, #256] \n" "vld1.u16 {d4-d7}, [%5 :64]! \n" "vshll.u16 q0, d4, #16 \n" "vshll.u16 q1, d5, #16 \n" "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "subs r4, r4, #1 \n" "vmla.f32 q8, q0, d8[0] \n" "vmla.f32 q9, q1, d8[1] \n" "vmla.f32 q10, q2, d9[0] \n" "vmla.f32 q11, q3, d9[1] \n" "bne 0b \n" "vadd.f32 q8, q8, q9 \n" "vadd.f32 q10, q10, q11 \n" "vadd.f32 q8, q8, q10 \n" "vadd.f32 q12, q12, q8 \n" "1: \n" // remain loop "and r4, %13, #3 \n" // r4 = remain = inch & 3; "cmp r4, #0 \n" "beq 3f \n" "2: \n" "pld [%4, #16] \n" "vld1.u16 {d9[]}, [%4]! \n" "vshll.u16 q4, d9, #16 \n" "pld [%5, #64] \n" "vld1.u16 {d1}, [%5 :64]! \n" "vshll.u16 q0, d1, #16 \n" "subs r4, r4, #1 \n" "vmla.f32 q12, q4, q0 \n" "bne 2b \n" "3: \n" "vshrn.u32 d24, q12, #16 \n" "vst1.u16 {d24[0]}, [%0]! \n" "vst1.u16 {d24[1]}, [%1]! \n" "vst1.u16 {d24[2]}, [%2]! \n" "vst1.u16 {d24[3]}, [%3]! \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(tmpptr), // %4 "=r"(kptr) // %5 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(tmpptr), "5"(kptr), "r"(biasptr), // %12 "r"(inch) // %13 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q8", "q9", "q10", "q11", "q12"); #endif // __aarch64__ #else float sum0 = biasptr[0]; float sum1 = biasptr[1]; float sum2 = biasptr[2]; float sum3 = biasptr[3]; for (int q = 0; q < inch; q++) { sum0 += bfloat16_to_float32(tmpptr[0]) * bfloat16_to_float32(kptr[0]); sum1 += bfloat16_to_float32(tmpptr[0]) * bfloat16_to_float32(kptr[1]); sum2 += bfloat16_to_float32(tmpptr[0]) * bfloat16_to_float32(kptr[2]); sum3 += bfloat16_to_float32(tmpptr[0]) * bfloat16_to_float32(kptr[3]); tmpptr++; kptr += 4; } outptr0[0] = float32_to_bfloat16(sum0); outptr1[0] = float32_to_bfloat16(sum1); outptr2[0] = float32_to_bfloat16(sum2); outptr3[0] = float32_to_bfloat16(sum3); outptr0++; outptr1++; outptr2++; outptr3++; #endif // __ARM_NEON } } remain_outch_start += nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { Mat out0 = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; unsigned short* outptr0 = out0; int i = 0; for (; i + 7 < size; i += 8) { const unsigned short* tmpptr = tmp.channel(i / 8); #if __ARM_NEON && __aarch64__ const unsigned short* kptr = kernel.channel(p / 8 + (p % 8) / 4 + p % 4); #else const unsigned short* kptr = kernel.channel(p / 4 + p % 4); #endif // __ARM_NEON && __aarch64__ #if __ARM_NEON #if __aarch64__ asm volatile( "dup v8.4s, %w6 \n" "dup v9.4s, %w6 \n" // inch loop "lsr w4, %w7, #2 \n" // w4 = nn = inch >> 2 "cmp w4, #0 \n" "beq 1f \n" "0: \n" "prfm pldl1keep, [%1, #256] \n" "ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%1], #32 \n" "shll v4.4s, v4.4h, #16 \n" "shll v5.4s, v5.4h, #16 \n" "shll v6.4s, v6.4h, #16 \n" "shll v7.4s, v7.4h, #16 \n" "prfm pldl1keep, [%2, #64] \n" "ld1 {v0.4h}, [%2], #8 \n" "shll v0.4s, v0.4h, #16 \n" "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v9.4s, v5.4s, v0.s[0] \n" "prfm pldl1keep, [%1, #256] \n" "ld1 {v12.4h, v13.4h, v14.4h, v15.4h}, [%1], #32 \n" "shll v12.4s, v12.4h, #16 \n" "shll v13.4s, v13.4h, #16 \n" "shll v14.4s, v14.4h, #16 \n" "shll v15.4s, v15.4h, #16 \n" "fmla v8.4s, v6.4s, v0.s[1] \n" "fmla v9.4s, v7.4s, v0.s[1] \n" "subs w4, w4, #1 \n" "fmla v8.4s, v12.4s, v0.s[2] \n" "fmla v9.4s, v13.4s, v0.s[2] \n" "fmla v8.4s, v14.4s, v0.s[3] \n" "fmla v9.4s, v15.4s, v0.s[3] \n" "bne 0b \n" "1: \n" // remain loop "and w4, %w7, #3 \n" // w4 = remain = inch & 3; "cmp w4, #0 \n" "beq 3f \n" "2: \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v4.4h, v5.4h}, [%1], #16 \n" "shll v4.4s, v4.4h, #16 \n" "shll v5.4s, v5.4h, #16 \n" "prfm pldl1keep, [%2, #16] \n" "ld1r {v0.4h}, [%2], #2 \n" "shll v0.4s, v0.4h, #16 \n" "subs w4, w4, #1 \n" "fmla v8.4s, v4.4s, v0.4s \n" "fmla v9.4s, v5.4s, v0.4s \n" "bne 2b \n" "3: \n" "shrn v8.4h, v8.4s, #16 \n" "shrn v9.4h, v9.4s, #16 \n" "st1 {v8.4h, v9.4h}, [%0], #16 \n" : "=r"(outptr0), // %0 "=r"(tmpptr), // %1 "=r"(kptr) // %2 : "0"(outptr0), "1"(tmpptr), "2"(kptr), "r"(bias0), // %6 "r"(inch) // %7 : "cc", "memory", "x4", "v0", "v4", "v5", "v6", "v7", "v8", "v9", "v12", "v13", "v14", "v15"); #else // __aarch64__ asm volatile( "vdup.f32 q8, %6 \n" "vdup.f32 q9, %6 \n" // inch loop "lsr r4, %7, #2 \n" // r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n" "pld [%1, #256] \n" "vld1.u16 {d12-d15}, [%1 :64]! \n" "vshll.u16 q4, d12, #16 \n" "vshll.u16 q5, d13, #16 \n" "vshll.u16 q6, d14, #16 \n" "vshll.u16 q7, d15, #16 \n" "pld [%2, #64] \n" "vld1.u16 {d1}, [%2 :64]! \n" "vshll.u16 q0, d1, #16 \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q5, d0[0] \n" "pld [%1, #256] \n" "vld1.u16 {d28-d31}, [%1 :64]! \n" "vshll.u16 q12, d28, #16 \n" "vshll.u16 q13, d29, #16 \n" "vshll.u16 q14, d30, #16 \n" "vshll.u16 q15, d31, #16 \n" "vmla.f32 q8, q6, d0[1] \n" "vmla.f32 q9, q7, d0[1] \n" "subs r4, r4, #1 \n" "vmla.f32 q8, q12, d1[0] \n" "vmla.f32 q9, q13, d1[0] \n" "vmla.f32 q8, q14, d1[1] \n" "vmla.f32 q9, q15, d1[1] \n" "bne 0b \n" "1: \n" // remain loop "and r4, %7, #3 \n" // r4 = remain = inch & 3; "cmp r4, #0 \n" "beq 3f \n" "2: \n" "pld [%1, #128] \n" "vld1.u16 {d10-d11}, [%1 :64]! \n" "vshll.u16 q4, d10, #16 \n" "vshll.u16 q5, d11, #16 \n" "pld [%2, #16] \n" "vld1.u16 {d1[]}, [%2]! \n" "vshll.u16 q0, d1, #16 \n" "subs r4, r4, #1 \n" "vmla.f32 q8, q4, q0 \n" "vmla.f32 q9, q5, q0 \n" "bne 2b \n" "3: \n" "vshrn.u32 d16, q8, #16 \n" "vshrn.u32 d17, q9, #16 \n" "vst1.u16 {d16-d17}, [%0 :64]! \n" : "=r"(outptr0), // %0 "=r"(tmpptr), // %1 "=r"(kptr) // %2 : "0"(outptr0), "1"(tmpptr), "2"(kptr), "r"(bias0), // %6 "r"(inch) // %7 : "cc", "memory", "r4", "q0", "q4", "q5", "q6", "q7", "q8", "q9", "q12", "q13", "q14", "q15"); #endif // __aarch64__ #else float sum0 = bias0; float sum1 = bias0; float sum2 = bias0; float sum3 = bias0; float sum4 = bias0; float sum5 = bias0; float sum6 = bias0; float sum7 = bias0; for (int q = 0; q < inch; q++) { sum0 += bfloat16_to_float32(tmpptr[0]) * bfloat16_to_float32(kptr[0]); sum1 += bfloat16_to_float32(tmpptr[1]) * bfloat16_to_float32(kptr[0]); sum2 += bfloat16_to_float32(tmpptr[2]) * bfloat16_to_float32(kptr[0]); sum3 += bfloat16_to_float32(tmpptr[3]) * bfloat16_to_float32(kptr[0]); sum4 += bfloat16_to_float32(tmpptr[4]) * bfloat16_to_float32(kptr[0]); sum5 += bfloat16_to_float32(tmpptr[5]) * bfloat16_to_float32(kptr[0]); sum6 += bfloat16_to_float32(tmpptr[6]) * bfloat16_to_float32(kptr[0]); sum7 += bfloat16_to_float32(tmpptr[7]) * bfloat16_to_float32(kptr[0]); tmpptr += 8; kptr++; } outptr0[0] = float32_to_bfloat16(sum0); outptr0[1] = float32_to_bfloat16(sum1); outptr0[2] = float32_to_bfloat16(sum2); outptr0[3] = float32_to_bfloat16(sum3); outptr0[4] = float32_to_bfloat16(sum4); outptr0[5] = float32_to_bfloat16(sum5); outptr0[6] = float32_to_bfloat16(sum6); outptr0[7] = float32_to_bfloat16(sum7); outptr0 += 8; #endif // __ARM_NEON } for (; i + 3 < size; i += 4) { const unsigned short* tmpptr = tmp.channel(i / 8 + (i % 8) / 4); #if __ARM_NEON && __aarch64__ const unsigned short* kptr = kernel.channel(p / 8 + (p % 8) / 4 + p % 4); #else const unsigned short* kptr = kernel.channel(p / 4 + p % 4); #endif // __ARM_NEON && __aarch64__ #if __ARM_NEON #if __aarch64__ asm volatile( "dup v8.4s, %w6 \n" // inch loop "lsr w4, %w7, #2 \n" // w4 = nn = inch >> 2 "cmp w4, #0 \n" "beq 1f \n" "0: \n" "prfm pldl1keep, [%1, #256] \n" "ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%1], #32 \n" "shll v4.4s, v4.4h, #16 \n" "shll v5.4s, v5.4h, #16 \n" "shll v6.4s, v6.4h, #16 \n" "shll v7.4s, v7.4h, #16 \n" "prfm pldl1keep, [%2, #64] \n" "ld1 {v0.4h}, [%2], #8 \n" "shll v0.4s, v0.4h, #16 \n" "subs w4, w4, #1 \n" "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v8.4s, v5.4s, v0.s[1] \n" "fmla v8.4s, v6.4s, v0.s[2] \n" "fmla v8.4s, v7.4s, v0.s[3] \n" "bne 0b \n" "1: \n" // remain loop "and w4, %w7, #3 \n" // w4 = remain = inch & 3; "cmp w4, #0 \n" "beq 3f \n" "2: \n" "prfm pldl1keep, [%1, #64] \n" "ld1 {v4.4h}, [%1], #8 \n" "shll v4.4s, v4.4h, #16 \n" "prfm pldl1keep, [%2, #16] \n" "ld1r {v0.4h}, [%2], #2 \n" "shll v0.4s, v0.4h, #16 \n" "subs w4, w4, #1 \n" "fmla v8.4s, v4.4s, v0.4s \n" "bne 2b \n" "3: \n" "shrn v8.4h, v8.4s, #16 \n" "st1 {v8.4h}, [%0], #8 \n" : "=r"(outptr0), // %0 "=r"(tmpptr), // %1 "=r"(kptr) // %2 : "0"(outptr0), "1"(tmpptr), "2"(kptr), "r"(bias0), // %6 "r"(inch) // %7 : "cc", "memory", "x4", "v0", "v4", "v5", "v6", "v7", "v8"); #else // __aarch64__ asm volatile( "vdup.f32 q8, %6 \n" // inch loop "lsr r4, %7, #2 \n" // r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n" "pld [%1, #256] \n" "vld1.u16 {d12-d15}, [%1 :64]! \n" "vshll.u16 q4, d12, #16 \n" "vshll.u16 q5, d13, #16 \n" "vshll.u16 q6, d14, #16 \n" "vshll.u16 q7, d15, #16 \n" "pld [%2, #64] \n" "vld1.u16 {d1}, [%2]! \n" "vshll.u16 q0, d1, #16 \n" "subs r4, r4, #1 \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q8, q5, d0[1] \n" "vmla.f32 q8, q6, d1[0] \n" "vmla.f32 q8, q7, d1[1] \n" "bne 0b \n" "1: \n" // remain loop "and r4, %7, #3 \n" // r4 = remain = inch & 3; "cmp r4, #0 \n" "beq 3f \n" "2: \n" "pld [%1, #64] \n" "vld1.u16 {d9}, [%1 :64]! \n" "vshll.u16 q4, d9, #16 \n" "pld [%2, #16] \n" "vld1.u16 {d1[]}, [%2]! \n" "vshll.u16 q0, d1, #16 \n" "subs r4, r4, #1 \n" "vmla.f32 q8, q4, q0 \n" "bne 2b \n" "3: \n" "vshrn.u32 d16, q8, #16 \n" "vst1.u16 {d16}, [%0 :64]! \n" : "=r"(outptr0), // %0 "=r"(tmpptr), // %1 "=r"(kptr) // %2 : "0"(outptr0), "1"(tmpptr), "2"(kptr), "r"(bias0), // %6 "r"(inch) // %7 : "cc", "memory", "r4", "q0", "q4", "q5", "q6", "q7", "q8"); #endif // __aarch64__ #else float sum0 = bias0; float sum1 = bias0; float sum2 = bias0; float sum3 = bias0; for (int q = 0; q < inch; q++) { sum0 += bfloat16_to_float32(tmpptr[0]) * bfloat16_to_float32(kptr[0]); sum1 += bfloat16_to_float32(tmpptr[1]) * bfloat16_to_float32(kptr[0]); sum2 += bfloat16_to_float32(tmpptr[2]) * bfloat16_to_float32(kptr[0]); sum3 += bfloat16_to_float32(tmpptr[3]) * bfloat16_to_float32(kptr[0]); tmpptr += 4; kptr++; } outptr0[0] = float32_to_bfloat16(sum0); outptr0[1] = float32_to_bfloat16(sum1); outptr0[2] = float32_to_bfloat16(sum2); outptr0[3] = float32_to_bfloat16(sum3); outptr0 += 4; #endif // __ARM_NEON } for (; i < size; i++) { const unsigned short* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4); #if __ARM_NEON && __aarch64__ const unsigned short* kptr = kernel.channel(p / 8 + (p % 8) / 4 + p % 4); #else const unsigned short* kptr = kernel.channel(p / 4 + p % 4); #endif // __ARM_NEON && __aarch64__ int q = 0; #if __ARM_NEON float32x4_t _sum0 = vdupq_n_f32(0.f); for (; q + 3 < inch; q += 4) { float32x4_t _p0 = vcvt_f32_bf16(vld1_u16(tmpptr)); tmpptr += 4; float32x4_t _k0 = vcvt_f32_bf16(vld1_u16(kptr)); kptr += 4; #if __aarch64__ _sum0 = vfmaq_f32(_sum0, _p0, _k0); #else _sum0 = vmlaq_f32(_sum0, _p0, _k0); #endif } #if __aarch64__ float sum0 = bias0 + vaddvq_f32(_sum0); #else float32x2_t _ss = vadd_f32(vget_low_f32(_sum0), vget_high_f32(_sum0)); float sum0 = bias0 + vget_lane_f32(vpadd_f32(_ss, _ss), 0); #endif #else float sum0 = bias0; #endif // __ARM_NEON for (; q < inch; q++) { sum0 += bfloat16_to_float32(tmpptr[0]) * bfloat16_to_float32(kptr[0]); tmpptr++; kptr++; } outptr0[0] = float32_to_bfloat16(sum0); outptr0++; } } // // NOTE sgemm // for (; p<outch; p++) // { // Mat out0 = top_blob.channel(p); // // const float bias0 = bias ? bias[p] : 0.f; // // float* outptr0 = out0; // // for (int i=0; i<size; i++) // { // float sum = bias0; // // const float* kptr = _kernel.channel(p/8 + p%8); // // for (int q=0; q<inch; q++) // { // const float* img0 = bottom_blob.channel(q); // // sum += img0[i] * kptr[0]; // kptr ++; // } // // outptr0[i] = sum; // } // } }
GB_unop__identity_uint16_fc32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_uint16_fc32 // op(A') function: GB_unop_tran__identity_uint16_fc32 // C type: uint16_t // A type: GxB_FC32_t // cast: uint16_t cij = GB_cast_to_uint16_t ((double) crealf (aij)) // unaryop: cij = aij #define GB_ATYPE \ GxB_FC32_t #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint16_t z = GB_cast_to_uint16_t ((double) crealf (aij)) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint16_t z = GB_cast_to_uint16_t ((double) crealf (aij)) ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT16 || GxB_NO_FC32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_uint16_fc32 ( uint16_t *Cx, // Cx and Ax may be aliased const GxB_FC32_t *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC32_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; uint16_t z = GB_cast_to_uint16_t ((double) crealf (aij)) ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC32_t aij = Ax [p] ; uint16_t z = GB_cast_to_uint16_t ((double) crealf (aij)) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_uint16_fc32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
kthread_test.c
#include <stdio.h> #include <assert.h> #include <stdlib.h> #include <pthread.h> #if HAVE_CILK #include <cilk/cilk.h> #include <cilk/cilk_api.h> #endif typedef struct { int max_iter, w, h; double xmin, xmax, ymin, ymax; int *k; } global_t; static void compute(void *_g, int i, int tid) { global_t *g = (global_t*)_g; double x, x0 = g->xmin + (g->xmax - g->xmin) * (i%g->w) / g->w; double y, y0 = g->ymin + (g->ymax - g->ymin) * (i/g->w) / g->h; int k; assert(g->k[i] < 0); x = x0, y = y0; for (k = 0; k < g->max_iter; ++k) { double z = x * y; x *= x; y *= y; if (x + y >= 4) break; x = x - y + x0; y = z + z + y0; } g->k[i] = k; } void kt_for(int n_threads, int n_items, void (*func)(void*,int,int), void *data); int main(int argc, char *argv[]) { int i, tmp, tot, type = 0, n_threads = 2; global_t global = { 10240*100, 800, 600, -2., -1.2, -1.2, 1.2, 0 }; // global_t global = { 10240*1, 8, 6, -2., -1.2, -1.2, 1.2, 0 }; if (argc > 1) { type = argv[1][0] == 'o'? 2 : argv[1][0] == 'c'? 3 : argv[1][0] == 'n'? 1 : 0; if (argv[1][0] >= '0' && argv[1][0] <= '9') n_threads = atoi(argv[1]); } else { fprintf(stderr, "Usage: ./a.out [openmp | cilk | #threads]\n"); } tot = global.w * global.h; global.k = calloc(tot, sizeof(int)); for (i = 0; i < tot; ++i) global.k[i] = -1; if (type == 0) { kt_for(n_threads, tot, compute, &global); } else if (type == 2) { #pragma omp parallel for for (i = 0; i < tot; ++i) compute(&global, i, 0); } else if (type == 3) { #if HAVE_CILK cilk_for (i = 0; i < tot; ++i) compute(&global, i, 0); #endif } for (i = tmp = 0; i < tot; ++i) tmp += (global.k[i] < 0); free(global.k); assert(tmp == 0); return 0; }
convolution_7x7_pack1to4.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv7x7s2_pack1to4_msa(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int tailstep = w - 2 * outw + w; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { Mat out0 = top_blob.channel(p); v4f32 _bias0 = bias ? (v4f32)__msa_ld_w(bias + p * 4, 0) : (v4f32)__msa_fill_w(0); out0.fill(_bias0); for (int q = 0; q < inch; q++) { float* outptr0 = out0; const Mat img0 = bottom_blob.channel(q); const float* r0 = img0.row(0); const float* r1 = img0.row(1); const float* r2 = img0.row(2); const float* r3 = img0.row(3); const float* r4 = img0.row(4); const float* r5 = img0.row(5); const float* r6 = img0.row(6); const float* kptr = kernel.channel(p).row(q); int i = 0; for (; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { v4f32 _sum0 = (v4f32)__msa_ld_w(outptr0, 0); v4f32 _sum1 = (v4f32)__msa_ld_w(outptr0 + 4, 0); v4f32 _sum2 = (v4f32)__msa_ld_w(outptr0 + 4 * 2, 0); v4f32 _sum3 = (v4f32)__msa_ld_w(outptr0 + 4 * 3, 0); v4f32 _k00 = (v4f32)__msa_ld_w(kptr, 0); v4f32 _k01 = (v4f32)__msa_ld_w(kptr + 4, 0); v4f32 _k02 = (v4f32)__msa_ld_w(kptr + 4 * 2, 0); v4f32 _k03 = (v4f32)__msa_ld_w(kptr + 4 * 3, 0); v4f32 _k04 = (v4f32)__msa_ld_w(kptr + 4 * 4, 0); v4f32 _k05 = (v4f32)__msa_ld_w(kptr + 4 * 5, 0); v4f32 _k06 = (v4f32)__msa_ld_w(kptr + 4 * 6, 0); kptr += 4 * 7; v4i32 _r0 = __msa_ld_w(r0, 0); v4i32 _r0n = __msa_ld_w(r0 + 4, 0); v4i32 _r0nn = __msa_ld_w(r0 + 8, 0); v4f32 _r00 = (v4f32)__msa_splati_w(_r0, 0); v4f32 _r01 = (v4f32)__msa_splati_w(_r0, 1); v4f32 _r02 = (v4f32)__msa_splati_w(_r0, 2); v4f32 _r03 = (v4f32)__msa_splati_w(_r0, 3); v4f32 _r04 = (v4f32)__msa_splati_w(_r0n, 0); v4f32 _r05 = (v4f32)__msa_splati_w(_r0n, 1); v4f32 _r06 = (v4f32)__msa_splati_w(_r0n, 2); v4f32 _r07 = (v4f32)__msa_splati_w(_r0n, 3); v4f32 _r08 = (v4f32)__msa_splati_w(_r0nn, 0); v4f32 _r09 = (v4f32)__msa_splati_w(_r0nn, 1); v4f32 _r0a = (v4f32)__msa_splati_w(_r0nn, 2); v4f32 _r0b = (v4f32)__msa_splati_w(_r0nn, 3); v4f32 _r0c = __msa_fill_w_f32(r0[12]); _sum0 = __msa_fmadd_w(_sum0, _r00, _k00); _sum1 = __msa_fmadd_w(_sum1, _r02, _k00); _sum2 = __msa_fmadd_w(_sum2, _r04, _k00); _sum3 = __msa_fmadd_w(_sum3, _r06, _k00); _sum0 = __msa_fmadd_w(_sum0, _r01, _k01); _sum1 = __msa_fmadd_w(_sum1, _r03, _k01); _sum2 = __msa_fmadd_w(_sum2, _r05, _k01); _sum3 = __msa_fmadd_w(_sum3, _r07, _k01); _sum0 = __msa_fmadd_w(_sum0, _r02, _k02); _sum1 = __msa_fmadd_w(_sum1, _r04, _k02); _sum2 = __msa_fmadd_w(_sum2, _r06, _k02); _sum3 = __msa_fmadd_w(_sum3, _r08, _k02); _sum0 = __msa_fmadd_w(_sum0, _r03, _k03); _sum1 = __msa_fmadd_w(_sum1, _r05, _k03); _sum2 = __msa_fmadd_w(_sum2, _r07, _k03); _sum3 = __msa_fmadd_w(_sum3, _r09, _k03); _sum0 = __msa_fmadd_w(_sum0, _r04, _k04); _sum1 = __msa_fmadd_w(_sum1, _r06, _k04); _sum2 = __msa_fmadd_w(_sum2, _r08, _k04); _sum3 = __msa_fmadd_w(_sum3, _r0a, _k04); _sum0 = __msa_fmadd_w(_sum0, _r05, _k05); _sum1 = __msa_fmadd_w(_sum1, _r07, _k05); _sum2 = __msa_fmadd_w(_sum2, _r09, _k05); _sum3 = __msa_fmadd_w(_sum3, _r0b, _k05); _sum0 = __msa_fmadd_w(_sum0, _r06, _k06); _sum1 = __msa_fmadd_w(_sum1, _r08, _k06); _sum2 = __msa_fmadd_w(_sum2, _r0a, _k06); _sum3 = __msa_fmadd_w(_sum3, _r0c, _k06); v4f32 _k10 = (v4f32)__msa_ld_w(kptr, 0); v4f32 _k11 = (v4f32)__msa_ld_w(kptr + 4, 0); v4f32 _k12 = (v4f32)__msa_ld_w(kptr + 4 * 2, 0); v4f32 _k13 = (v4f32)__msa_ld_w(kptr + 4 * 3, 0); v4f32 _k14 = (v4f32)__msa_ld_w(kptr + 4 * 4, 0); v4f32 _k15 = (v4f32)__msa_ld_w(kptr + 4 * 5, 0); v4f32 _k16 = (v4f32)__msa_ld_w(kptr + 4 * 6, 0); kptr += 4 * 7; v4i32 _r1 = __msa_ld_w(r1, 0); v4i32 _r1n = __msa_ld_w(r1 + 4, 0); v4i32 _r1nn = __msa_ld_w(r1 + 8, 0); v4f32 _r10 = (v4f32)__msa_splati_w(_r1, 0); v4f32 _r11 = (v4f32)__msa_splati_w(_r1, 1); v4f32 _r12 = (v4f32)__msa_splati_w(_r1, 2); v4f32 _r13 = (v4f32)__msa_splati_w(_r1, 3); v4f32 _r14 = (v4f32)__msa_splati_w(_r1n, 0); v4f32 _r15 = (v4f32)__msa_splati_w(_r1n, 1); v4f32 _r16 = (v4f32)__msa_splati_w(_r1n, 2); v4f32 _r17 = (v4f32)__msa_splati_w(_r1n, 3); v4f32 _r18 = (v4f32)__msa_splati_w(_r1nn, 0); v4f32 _r19 = (v4f32)__msa_splati_w(_r1nn, 1); v4f32 _r1a = (v4f32)__msa_splati_w(_r1nn, 2); v4f32 _r1b = (v4f32)__msa_splati_w(_r1nn, 3); v4f32 _r1c = __msa_fill_w_f32(r1[12]); _sum0 = __msa_fmadd_w(_sum0, _r10, _k10); _sum1 = __msa_fmadd_w(_sum1, _r12, _k10); _sum2 = __msa_fmadd_w(_sum2, _r14, _k10); _sum3 = __msa_fmadd_w(_sum3, _r16, _k10); _sum0 = __msa_fmadd_w(_sum0, _r11, _k11); _sum1 = __msa_fmadd_w(_sum1, _r13, _k11); _sum2 = __msa_fmadd_w(_sum2, _r15, _k11); _sum3 = __msa_fmadd_w(_sum3, _r17, _k11); _sum0 = __msa_fmadd_w(_sum0, _r12, _k12); _sum1 = __msa_fmadd_w(_sum1, _r14, _k12); _sum2 = __msa_fmadd_w(_sum2, _r16, _k12); _sum3 = __msa_fmadd_w(_sum3, _r18, _k12); _sum0 = __msa_fmadd_w(_sum0, _r13, _k13); _sum1 = __msa_fmadd_w(_sum1, _r15, _k13); _sum2 = __msa_fmadd_w(_sum2, _r17, _k13); _sum3 = __msa_fmadd_w(_sum3, _r19, _k13); _sum0 = __msa_fmadd_w(_sum0, _r14, _k14); _sum1 = __msa_fmadd_w(_sum1, _r16, _k14); _sum2 = __msa_fmadd_w(_sum2, _r18, _k14); _sum3 = __msa_fmadd_w(_sum3, _r1a, _k14); _sum0 = __msa_fmadd_w(_sum0, _r15, _k15); _sum1 = __msa_fmadd_w(_sum1, _r17, _k15); _sum2 = __msa_fmadd_w(_sum2, _r19, _k15); _sum3 = __msa_fmadd_w(_sum3, _r1b, _k15); _sum0 = __msa_fmadd_w(_sum0, _r16, _k16); _sum1 = __msa_fmadd_w(_sum1, _r18, _k16); _sum2 = __msa_fmadd_w(_sum2, _r1a, _k16); _sum3 = __msa_fmadd_w(_sum3, _r1c, _k16); v4f32 _k20 = (v4f32)__msa_ld_w(kptr, 0); v4f32 _k21 = (v4f32)__msa_ld_w(kptr + 4, 0); v4f32 _k22 = (v4f32)__msa_ld_w(kptr + 4 * 2, 0); v4f32 _k23 = (v4f32)__msa_ld_w(kptr + 4 * 3, 0); v4f32 _k24 = (v4f32)__msa_ld_w(kptr + 4 * 4, 0); v4f32 _k25 = (v4f32)__msa_ld_w(kptr + 4 * 5, 0); v4f32 _k26 = (v4f32)__msa_ld_w(kptr + 4 * 6, 0); kptr += 4 * 7; v4i32 _r2 = __msa_ld_w(r2, 0); v4i32 _r2n = __msa_ld_w(r2 + 4, 0); v4i32 _r2nn = __msa_ld_w(r2 + 8, 0); v4f32 _r20 = (v4f32)__msa_splati_w(_r2, 0); v4f32 _r21 = (v4f32)__msa_splati_w(_r2, 1); v4f32 _r22 = (v4f32)__msa_splati_w(_r2, 2); v4f32 _r23 = (v4f32)__msa_splati_w(_r2, 3); v4f32 _r24 = (v4f32)__msa_splati_w(_r2n, 0); v4f32 _r25 = (v4f32)__msa_splati_w(_r2n, 1); v4f32 _r26 = (v4f32)__msa_splati_w(_r2n, 2); v4f32 _r27 = (v4f32)__msa_splati_w(_r2n, 3); v4f32 _r28 = (v4f32)__msa_splati_w(_r2nn, 0); v4f32 _r29 = (v4f32)__msa_splati_w(_r2nn, 1); v4f32 _r2a = (v4f32)__msa_splati_w(_r2nn, 2); v4f32 _r2b = (v4f32)__msa_splati_w(_r2nn, 3); v4f32 _r2c = __msa_fill_w_f32(r2[12]); _sum0 = __msa_fmadd_w(_sum0, _r20, _k20); _sum1 = __msa_fmadd_w(_sum1, _r22, _k20); _sum2 = __msa_fmadd_w(_sum2, _r24, _k20); _sum3 = __msa_fmadd_w(_sum3, _r26, _k20); _sum0 = __msa_fmadd_w(_sum0, _r21, _k21); _sum1 = __msa_fmadd_w(_sum1, _r23, _k21); _sum2 = __msa_fmadd_w(_sum2, _r25, _k21); _sum3 = __msa_fmadd_w(_sum3, _r27, _k21); _sum0 = __msa_fmadd_w(_sum0, _r22, _k22); _sum1 = __msa_fmadd_w(_sum1, _r24, _k22); _sum2 = __msa_fmadd_w(_sum2, _r26, _k22); _sum3 = __msa_fmadd_w(_sum3, _r28, _k22); _sum0 = __msa_fmadd_w(_sum0, _r23, _k23); _sum1 = __msa_fmadd_w(_sum1, _r25, _k23); _sum2 = __msa_fmadd_w(_sum2, _r27, _k23); _sum3 = __msa_fmadd_w(_sum3, _r29, _k23); _sum0 = __msa_fmadd_w(_sum0, _r24, _k24); _sum1 = __msa_fmadd_w(_sum1, _r26, _k24); _sum2 = __msa_fmadd_w(_sum2, _r28, _k24); _sum3 = __msa_fmadd_w(_sum3, _r2a, _k24); _sum0 = __msa_fmadd_w(_sum0, _r25, _k25); _sum1 = __msa_fmadd_w(_sum1, _r27, _k25); _sum2 = __msa_fmadd_w(_sum2, _r29, _k25); _sum3 = __msa_fmadd_w(_sum3, _r2b, _k25); _sum0 = __msa_fmadd_w(_sum0, _r26, _k26); _sum1 = __msa_fmadd_w(_sum1, _r28, _k26); _sum2 = __msa_fmadd_w(_sum2, _r2a, _k26); _sum3 = __msa_fmadd_w(_sum3, _r2c, _k26); v4f32 _k30 = (v4f32)__msa_ld_w(kptr, 0); v4f32 _k31 = (v4f32)__msa_ld_w(kptr + 4, 0); v4f32 _k32 = (v4f32)__msa_ld_w(kptr + 4 * 2, 0); v4f32 _k33 = (v4f32)__msa_ld_w(kptr + 4 * 3, 0); v4f32 _k34 = (v4f32)__msa_ld_w(kptr + 4 * 4, 0); v4f32 _k35 = (v4f32)__msa_ld_w(kptr + 4 * 5, 0); v4f32 _k36 = (v4f32)__msa_ld_w(kptr + 4 * 6, 0); kptr += 4 * 7; v4i32 _r3 = __msa_ld_w(r3, 0); v4i32 _r3n = __msa_ld_w(r3 + 4, 0); v4i32 _r3nn = __msa_ld_w(r3 + 8, 0); v4f32 _r30 = (v4f32)__msa_splati_w(_r3, 0); v4f32 _r31 = (v4f32)__msa_splati_w(_r3, 1); v4f32 _r32 = (v4f32)__msa_splati_w(_r3, 2); v4f32 _r33 = (v4f32)__msa_splati_w(_r3, 3); v4f32 _r34 = (v4f32)__msa_splati_w(_r3n, 0); v4f32 _r35 = (v4f32)__msa_splati_w(_r3n, 1); v4f32 _r36 = (v4f32)__msa_splati_w(_r3n, 2); v4f32 _r37 = (v4f32)__msa_splati_w(_r3n, 3); v4f32 _r38 = (v4f32)__msa_splati_w(_r3nn, 0); v4f32 _r39 = (v4f32)__msa_splati_w(_r3nn, 1); v4f32 _r3a = (v4f32)__msa_splati_w(_r3nn, 2); v4f32 _r3b = (v4f32)__msa_splati_w(_r3nn, 3); v4f32 _r3c = __msa_fill_w_f32(r3[12]); _sum0 = __msa_fmadd_w(_sum0, _r30, _k30); _sum1 = __msa_fmadd_w(_sum1, _r32, _k30); _sum2 = __msa_fmadd_w(_sum2, _r34, _k30); _sum3 = __msa_fmadd_w(_sum3, _r36, _k30); _sum0 = __msa_fmadd_w(_sum0, _r31, _k31); _sum1 = __msa_fmadd_w(_sum1, _r33, _k31); _sum2 = __msa_fmadd_w(_sum2, _r35, _k31); _sum3 = __msa_fmadd_w(_sum3, _r37, _k31); _sum0 = __msa_fmadd_w(_sum0, _r32, _k32); _sum1 = __msa_fmadd_w(_sum1, _r34, _k32); _sum2 = __msa_fmadd_w(_sum2, _r36, _k32); _sum3 = __msa_fmadd_w(_sum3, _r38, _k32); _sum0 = __msa_fmadd_w(_sum0, _r33, _k33); _sum1 = __msa_fmadd_w(_sum1, _r35, _k33); _sum2 = __msa_fmadd_w(_sum2, _r37, _k33); _sum3 = __msa_fmadd_w(_sum3, _r39, _k33); _sum0 = __msa_fmadd_w(_sum0, _r34, _k34); _sum1 = __msa_fmadd_w(_sum1, _r36, _k34); _sum2 = __msa_fmadd_w(_sum2, _r38, _k34); _sum3 = __msa_fmadd_w(_sum3, _r3a, _k34); _sum0 = __msa_fmadd_w(_sum0, _r35, _k35); _sum1 = __msa_fmadd_w(_sum1, _r37, _k35); _sum2 = __msa_fmadd_w(_sum2, _r39, _k35); _sum3 = __msa_fmadd_w(_sum3, _r3b, _k35); _sum0 = __msa_fmadd_w(_sum0, _r36, _k36); _sum1 = __msa_fmadd_w(_sum1, _r38, _k36); _sum2 = __msa_fmadd_w(_sum2, _r3a, _k36); _sum3 = __msa_fmadd_w(_sum3, _r3c, _k36); v4f32 _k40 = (v4f32)__msa_ld_w(kptr, 0); v4f32 _k41 = (v4f32)__msa_ld_w(kptr + 4, 0); v4f32 _k42 = (v4f32)__msa_ld_w(kptr + 4 * 2, 0); v4f32 _k43 = (v4f32)__msa_ld_w(kptr + 4 * 3, 0); v4f32 _k44 = (v4f32)__msa_ld_w(kptr + 4 * 4, 0); v4f32 _k45 = (v4f32)__msa_ld_w(kptr + 4 * 5, 0); v4f32 _k46 = (v4f32)__msa_ld_w(kptr + 4 * 6, 0); kptr += 4 * 7; v4i32 _r4 = __msa_ld_w(r4, 0); v4i32 _r4n = __msa_ld_w(r4 + 4, 0); v4i32 _r4nn = __msa_ld_w(r4 + 8, 0); v4f32 _r40 = (v4f32)__msa_splati_w(_r4, 0); v4f32 _r41 = (v4f32)__msa_splati_w(_r4, 1); v4f32 _r42 = (v4f32)__msa_splati_w(_r4, 2); v4f32 _r43 = (v4f32)__msa_splati_w(_r4, 3); v4f32 _r44 = (v4f32)__msa_splati_w(_r4n, 0); v4f32 _r45 = (v4f32)__msa_splati_w(_r4n, 1); v4f32 _r46 = (v4f32)__msa_splati_w(_r4n, 2); v4f32 _r47 = (v4f32)__msa_splati_w(_r4n, 3); v4f32 _r48 = (v4f32)__msa_splati_w(_r4nn, 0); v4f32 _r49 = (v4f32)__msa_splati_w(_r4nn, 1); v4f32 _r4a = (v4f32)__msa_splati_w(_r4nn, 2); v4f32 _r4b = (v4f32)__msa_splati_w(_r4nn, 3); v4f32 _r4c = __msa_fill_w_f32(r4[12]); _sum0 = __msa_fmadd_w(_sum0, _r40, _k40); _sum1 = __msa_fmadd_w(_sum1, _r42, _k40); _sum2 = __msa_fmadd_w(_sum2, _r44, _k40); _sum3 = __msa_fmadd_w(_sum3, _r46, _k40); _sum0 = __msa_fmadd_w(_sum0, _r41, _k41); _sum1 = __msa_fmadd_w(_sum1, _r43, _k41); _sum2 = __msa_fmadd_w(_sum2, _r45, _k41); _sum3 = __msa_fmadd_w(_sum3, _r47, _k41); _sum0 = __msa_fmadd_w(_sum0, _r42, _k42); _sum1 = __msa_fmadd_w(_sum1, _r44, _k42); _sum2 = __msa_fmadd_w(_sum2, _r46, _k42); _sum3 = __msa_fmadd_w(_sum3, _r48, _k42); _sum0 = __msa_fmadd_w(_sum0, _r43, _k43); _sum1 = __msa_fmadd_w(_sum1, _r45, _k43); _sum2 = __msa_fmadd_w(_sum2, _r47, _k43); _sum3 = __msa_fmadd_w(_sum3, _r49, _k43); _sum0 = __msa_fmadd_w(_sum0, _r44, _k44); _sum1 = __msa_fmadd_w(_sum1, _r46, _k44); _sum2 = __msa_fmadd_w(_sum2, _r48, _k44); _sum3 = __msa_fmadd_w(_sum3, _r4a, _k44); _sum0 = __msa_fmadd_w(_sum0, _r45, _k45); _sum1 = __msa_fmadd_w(_sum1, _r47, _k45); _sum2 = __msa_fmadd_w(_sum2, _r49, _k45); _sum3 = __msa_fmadd_w(_sum3, _r4b, _k45); _sum0 = __msa_fmadd_w(_sum0, _r46, _k46); _sum1 = __msa_fmadd_w(_sum1, _r48, _k46); _sum2 = __msa_fmadd_w(_sum2, _r4a, _k46); _sum3 = __msa_fmadd_w(_sum3, _r4c, _k46); v4f32 _k50 = (v4f32)__msa_ld_w(kptr, 0); v4f32 _k51 = (v4f32)__msa_ld_w(kptr + 4, 0); v4f32 _k52 = (v4f32)__msa_ld_w(kptr + 4 * 2, 0); v4f32 _k53 = (v4f32)__msa_ld_w(kptr + 4 * 3, 0); v4f32 _k54 = (v4f32)__msa_ld_w(kptr + 4 * 4, 0); v4f32 _k55 = (v4f32)__msa_ld_w(kptr + 4 * 5, 0); v4f32 _k56 = (v4f32)__msa_ld_w(kptr + 4 * 6, 0); kptr += 4 * 7; v4i32 _r5 = __msa_ld_w(r5, 0); v4i32 _r5n = __msa_ld_w(r5 + 4, 0); v4i32 _r5nn = __msa_ld_w(r5 + 8, 0); v4f32 _r50 = (v4f32)__msa_splati_w(_r5, 0); v4f32 _r51 = (v4f32)__msa_splati_w(_r5, 1); v4f32 _r52 = (v4f32)__msa_splati_w(_r5, 2); v4f32 _r53 = (v4f32)__msa_splati_w(_r5, 3); v4f32 _r54 = (v4f32)__msa_splati_w(_r5n, 0); v4f32 _r55 = (v4f32)__msa_splati_w(_r5n, 1); v4f32 _r56 = (v4f32)__msa_splati_w(_r5n, 2); v4f32 _r57 = (v4f32)__msa_splati_w(_r5n, 3); v4f32 _r58 = (v4f32)__msa_splati_w(_r5nn, 0); v4f32 _r59 = (v4f32)__msa_splati_w(_r5nn, 1); v4f32 _r5a = (v4f32)__msa_splati_w(_r5nn, 2); v4f32 _r5b = (v4f32)__msa_splati_w(_r5nn, 3); v4f32 _r5c = __msa_fill_w_f32(r5[12]); _sum0 = __msa_fmadd_w(_sum0, _r50, _k50); _sum1 = __msa_fmadd_w(_sum1, _r52, _k50); _sum2 = __msa_fmadd_w(_sum2, _r54, _k50); _sum3 = __msa_fmadd_w(_sum3, _r56, _k50); _sum0 = __msa_fmadd_w(_sum0, _r51, _k51); _sum1 = __msa_fmadd_w(_sum1, _r53, _k51); _sum2 = __msa_fmadd_w(_sum2, _r55, _k51); _sum3 = __msa_fmadd_w(_sum3, _r57, _k51); _sum0 = __msa_fmadd_w(_sum0, _r52, _k52); _sum1 = __msa_fmadd_w(_sum1, _r54, _k52); _sum2 = __msa_fmadd_w(_sum2, _r56, _k52); _sum3 = __msa_fmadd_w(_sum3, _r58, _k52); _sum0 = __msa_fmadd_w(_sum0, _r53, _k53); _sum1 = __msa_fmadd_w(_sum1, _r55, _k53); _sum2 = __msa_fmadd_w(_sum2, _r57, _k53); _sum3 = __msa_fmadd_w(_sum3, _r59, _k53); _sum0 = __msa_fmadd_w(_sum0, _r54, _k54); _sum1 = __msa_fmadd_w(_sum1, _r56, _k54); _sum2 = __msa_fmadd_w(_sum2, _r58, _k54); _sum3 = __msa_fmadd_w(_sum3, _r5a, _k54); _sum0 = __msa_fmadd_w(_sum0, _r55, _k55); _sum1 = __msa_fmadd_w(_sum1, _r57, _k55); _sum2 = __msa_fmadd_w(_sum2, _r59, _k55); _sum3 = __msa_fmadd_w(_sum3, _r5b, _k55); _sum0 = __msa_fmadd_w(_sum0, _r56, _k56); _sum1 = __msa_fmadd_w(_sum1, _r58, _k56); _sum2 = __msa_fmadd_w(_sum2, _r5a, _k56); _sum3 = __msa_fmadd_w(_sum3, _r5c, _k56); v4f32 _k60 = (v4f32)__msa_ld_w(kptr, 0); v4f32 _k61 = (v4f32)__msa_ld_w(kptr + 4, 0); v4f32 _k62 = (v4f32)__msa_ld_w(kptr + 4 * 2, 0); v4f32 _k63 = (v4f32)__msa_ld_w(kptr + 4 * 3, 0); v4f32 _k64 = (v4f32)__msa_ld_w(kptr + 4 * 4, 0); v4f32 _k65 = (v4f32)__msa_ld_w(kptr + 4 * 5, 0); v4f32 _k66 = (v4f32)__msa_ld_w(kptr + 4 * 6, 0); kptr -= 4 * 42; v4i32 _r6 = __msa_ld_w(r6, 0); v4i32 _r6n = __msa_ld_w(r6 + 4, 0); v4i32 _r6nn = __msa_ld_w(r6 + 8, 0); v4f32 _r60 = (v4f32)__msa_splati_w(_r6, 0); v4f32 _r61 = (v4f32)__msa_splati_w(_r6, 1); v4f32 _r62 = (v4f32)__msa_splati_w(_r6, 2); v4f32 _r63 = (v4f32)__msa_splati_w(_r6, 3); v4f32 _r64 = (v4f32)__msa_splati_w(_r6n, 0); v4f32 _r65 = (v4f32)__msa_splati_w(_r6n, 1); v4f32 _r66 = (v4f32)__msa_splati_w(_r6n, 2); v4f32 _r67 = (v4f32)__msa_splati_w(_r6n, 3); v4f32 _r68 = (v4f32)__msa_splati_w(_r6nn, 0); v4f32 _r69 = (v4f32)__msa_splati_w(_r6nn, 1); v4f32 _r6a = (v4f32)__msa_splati_w(_r6nn, 2); v4f32 _r6b = (v4f32)__msa_splati_w(_r6nn, 3); v4f32 _r6c = __msa_fill_w_f32(r6[12]); _sum0 = __msa_fmadd_w(_sum0, _r60, _k60); _sum1 = __msa_fmadd_w(_sum1, _r62, _k60); _sum2 = __msa_fmadd_w(_sum2, _r64, _k60); _sum3 = __msa_fmadd_w(_sum3, _r66, _k60); _sum0 = __msa_fmadd_w(_sum0, _r61, _k61); _sum1 = __msa_fmadd_w(_sum1, _r63, _k61); _sum2 = __msa_fmadd_w(_sum2, _r65, _k61); _sum3 = __msa_fmadd_w(_sum3, _r67, _k61); _sum0 = __msa_fmadd_w(_sum0, _r62, _k62); _sum1 = __msa_fmadd_w(_sum1, _r64, _k62); _sum2 = __msa_fmadd_w(_sum2, _r66, _k62); _sum3 = __msa_fmadd_w(_sum3, _r68, _k62); _sum0 = __msa_fmadd_w(_sum0, _r63, _k63); _sum1 = __msa_fmadd_w(_sum1, _r65, _k63); _sum2 = __msa_fmadd_w(_sum2, _r67, _k63); _sum3 = __msa_fmadd_w(_sum3, _r69, _k63); _sum0 = __msa_fmadd_w(_sum0, _r64, _k64); _sum1 = __msa_fmadd_w(_sum1, _r66, _k64); _sum2 = __msa_fmadd_w(_sum2, _r68, _k64); _sum3 = __msa_fmadd_w(_sum3, _r6a, _k64); _sum0 = __msa_fmadd_w(_sum0, _r65, _k65); _sum1 = __msa_fmadd_w(_sum1, _r67, _k65); _sum2 = __msa_fmadd_w(_sum2, _r69, _k65); _sum3 = __msa_fmadd_w(_sum3, _r6b, _k65); _sum0 = __msa_fmadd_w(_sum0, _r66, _k66); _sum1 = __msa_fmadd_w(_sum1, _r68, _k66); _sum2 = __msa_fmadd_w(_sum2, _r6a, _k66); _sum3 = __msa_fmadd_w(_sum3, _r6c, _k66); __msa_st_w((v4i32)_sum0, outptr0, 0); __msa_st_w((v4i32)_sum1, outptr0 + 4, 0); __msa_st_w((v4i32)_sum2, outptr0 + 4 * 2, 0); __msa_st_w((v4i32)_sum3, outptr0 + 4 * 3, 0); outptr0 += 4 * 4; r0 += 8; r1 += 8; r2 += 8; r3 += 8; r4 += 8; r5 += 8; r6 += 8; } for (; j < outw; j++) { v4f32 _sum0 = (v4f32)__msa_ld_w(outptr0, 0); v4f32 _k00 = (v4f32)__msa_ld_w(kptr, 0); v4f32 _k01 = (v4f32)__msa_ld_w(kptr + 4, 0); v4f32 _k02 = (v4f32)__msa_ld_w(kptr + 4 * 2, 0); v4f32 _k03 = (v4f32)__msa_ld_w(kptr + 4 * 3, 0); v4f32 _k04 = (v4f32)__msa_ld_w(kptr + 4 * 4, 0); v4f32 _k05 = (v4f32)__msa_ld_w(kptr + 4 * 5, 0); v4f32 _k06 = (v4f32)__msa_ld_w(kptr + 4 * 6, 0); kptr += 4 * 7; v4i32 _r0 = __msa_ld_w(r0, 0); v4i32 _r0n = __msa_ld_w(r0 + 4, 0); _sum0 = __msa_fmadd_w(_sum0, (v4f32)__msa_splati_w(_r0, 0), _k00); _sum0 = __msa_fmadd_w(_sum0, (v4f32)__msa_splati_w(_r0, 1), _k01); _sum0 = __msa_fmadd_w(_sum0, (v4f32)__msa_splati_w(_r0, 2), _k02); _sum0 = __msa_fmadd_w(_sum0, (v4f32)__msa_splati_w(_r0, 3), _k03); _sum0 = __msa_fmadd_w(_sum0, (v4f32)__msa_splati_w(_r0n, 0), _k04); _sum0 = __msa_fmadd_w(_sum0, (v4f32)__msa_splati_w(_r0n, 1), _k05); _sum0 = __msa_fmadd_w(_sum0, (v4f32)__msa_splati_w(_r0n, 2), _k06); v4f32 _k10 = (v4f32)__msa_ld_w(kptr, 0); v4f32 _k11 = (v4f32)__msa_ld_w(kptr + 4, 0); v4f32 _k12 = (v4f32)__msa_ld_w(kptr + 4 * 2, 0); v4f32 _k13 = (v4f32)__msa_ld_w(kptr + 4 * 3, 0); v4f32 _k14 = (v4f32)__msa_ld_w(kptr + 4 * 4, 0); v4f32 _k15 = (v4f32)__msa_ld_w(kptr + 4 * 5, 0); v4f32 _k16 = (v4f32)__msa_ld_w(kptr + 4 * 6, 0); kptr += 4 * 7; v4i32 _r1 = __msa_ld_w(r1, 0); v4i32 _r1n = __msa_ld_w(r1 + 4, 0); _sum0 = __msa_fmadd_w(_sum0, (v4f32)__msa_splati_w(_r1, 0), _k10); _sum0 = __msa_fmadd_w(_sum0, (v4f32)__msa_splati_w(_r1, 1), _k11); _sum0 = __msa_fmadd_w(_sum0, (v4f32)__msa_splati_w(_r1, 2), _k12); _sum0 = __msa_fmadd_w(_sum0, (v4f32)__msa_splati_w(_r1, 3), _k13); _sum0 = __msa_fmadd_w(_sum0, (v4f32)__msa_splati_w(_r1n, 0), _k14); _sum0 = __msa_fmadd_w(_sum0, (v4f32)__msa_splati_w(_r1n, 1), _k15); _sum0 = __msa_fmadd_w(_sum0, (v4f32)__msa_splati_w(_r1n, 2), _k16); v4f32 _k20 = (v4f32)__msa_ld_w(kptr, 0); v4f32 _k21 = (v4f32)__msa_ld_w(kptr + 4, 0); v4f32 _k22 = (v4f32)__msa_ld_w(kptr + 4 * 2, 0); v4f32 _k23 = (v4f32)__msa_ld_w(kptr + 4 * 3, 0); v4f32 _k24 = (v4f32)__msa_ld_w(kptr + 4 * 4, 0); v4f32 _k25 = (v4f32)__msa_ld_w(kptr + 4 * 5, 0); v4f32 _k26 = (v4f32)__msa_ld_w(kptr + 4 * 6, 0); kptr += 4 * 7; v4i32 _r2 = __msa_ld_w(r2, 0); v4i32 _r2n = __msa_ld_w(r2 + 4, 0); _sum0 = __msa_fmadd_w(_sum0, (v4f32)__msa_splati_w(_r2, 0), _k20); _sum0 = __msa_fmadd_w(_sum0, (v4f32)__msa_splati_w(_r2, 1), _k21); _sum0 = __msa_fmadd_w(_sum0, (v4f32)__msa_splati_w(_r2, 2), _k22); _sum0 = __msa_fmadd_w(_sum0, (v4f32)__msa_splati_w(_r2, 3), _k23); _sum0 = __msa_fmadd_w(_sum0, (v4f32)__msa_splati_w(_r2n, 0), _k24); _sum0 = __msa_fmadd_w(_sum0, (v4f32)__msa_splati_w(_r2n, 1), _k25); _sum0 = __msa_fmadd_w(_sum0, (v4f32)__msa_splati_w(_r2n, 2), _k26); v4f32 _k30 = (v4f32)__msa_ld_w(kptr, 0); v4f32 _k31 = (v4f32)__msa_ld_w(kptr + 4, 0); v4f32 _k32 = (v4f32)__msa_ld_w(kptr + 4 * 2, 0); v4f32 _k33 = (v4f32)__msa_ld_w(kptr + 4 * 3, 0); v4f32 _k34 = (v4f32)__msa_ld_w(kptr + 4 * 4, 0); v4f32 _k35 = (v4f32)__msa_ld_w(kptr + 4 * 5, 0); v4f32 _k36 = (v4f32)__msa_ld_w(kptr + 4 * 6, 0); kptr += 4 * 7; v4i32 _r3 = __msa_ld_w(r3, 0); v4i32 _r3n = __msa_ld_w(r3 + 4, 0); _sum0 = __msa_fmadd_w(_sum0, (v4f32)__msa_splati_w(_r3, 0), _k30); _sum0 = __msa_fmadd_w(_sum0, (v4f32)__msa_splati_w(_r3, 1), _k31); _sum0 = __msa_fmadd_w(_sum0, (v4f32)__msa_splati_w(_r3, 2), _k32); _sum0 = __msa_fmadd_w(_sum0, (v4f32)__msa_splati_w(_r3, 3), _k33); _sum0 = __msa_fmadd_w(_sum0, (v4f32)__msa_splati_w(_r3n, 0), _k34); _sum0 = __msa_fmadd_w(_sum0, (v4f32)__msa_splati_w(_r3n, 1), _k35); _sum0 = __msa_fmadd_w(_sum0, (v4f32)__msa_splati_w(_r3n, 2), _k36); v4f32 _k40 = (v4f32)__msa_ld_w(kptr, 0); v4f32 _k41 = (v4f32)__msa_ld_w(kptr + 4, 0); v4f32 _k42 = (v4f32)__msa_ld_w(kptr + 4 * 2, 0); v4f32 _k43 = (v4f32)__msa_ld_w(kptr + 4 * 3, 0); v4f32 _k44 = (v4f32)__msa_ld_w(kptr + 4 * 4, 0); v4f32 _k45 = (v4f32)__msa_ld_w(kptr + 4 * 5, 0); v4f32 _k46 = (v4f32)__msa_ld_w(kptr + 4 * 6, 0); kptr += 4 * 7; v4i32 _r4 = __msa_ld_w(r4, 0); v4i32 _r4n = __msa_ld_w(r4 + 4, 0); _sum0 = __msa_fmadd_w(_sum0, (v4f32)__msa_splati_w(_r4, 0), _k40); _sum0 = __msa_fmadd_w(_sum0, (v4f32)__msa_splati_w(_r4, 1), _k41); _sum0 = __msa_fmadd_w(_sum0, (v4f32)__msa_splati_w(_r4, 2), _k42); _sum0 = __msa_fmadd_w(_sum0, (v4f32)__msa_splati_w(_r4, 3), _k43); _sum0 = __msa_fmadd_w(_sum0, (v4f32)__msa_splati_w(_r4n, 0), _k44); _sum0 = __msa_fmadd_w(_sum0, (v4f32)__msa_splati_w(_r4n, 1), _k45); _sum0 = __msa_fmadd_w(_sum0, (v4f32)__msa_splati_w(_r4n, 2), _k46); v4f32 _k50 = (v4f32)__msa_ld_w(kptr, 0); v4f32 _k51 = (v4f32)__msa_ld_w(kptr + 4, 0); v4f32 _k52 = (v4f32)__msa_ld_w(kptr + 4 * 2, 0); v4f32 _k53 = (v4f32)__msa_ld_w(kptr + 4 * 3, 0); v4f32 _k54 = (v4f32)__msa_ld_w(kptr + 4 * 4, 0); v4f32 _k55 = (v4f32)__msa_ld_w(kptr + 4 * 5, 0); v4f32 _k56 = (v4f32)__msa_ld_w(kptr + 4 * 6, 0); kptr += 4 * 7; v4i32 _r5 = __msa_ld_w(r5, 0); v4i32 _r5n = __msa_ld_w(r5 + 4, 0); _sum0 = __msa_fmadd_w(_sum0, (v4f32)__msa_splati_w(_r5, 0), _k50); _sum0 = __msa_fmadd_w(_sum0, (v4f32)__msa_splati_w(_r5, 1), _k51); _sum0 = __msa_fmadd_w(_sum0, (v4f32)__msa_splati_w(_r5, 2), _k52); _sum0 = __msa_fmadd_w(_sum0, (v4f32)__msa_splati_w(_r5, 3), _k53); _sum0 = __msa_fmadd_w(_sum0, (v4f32)__msa_splati_w(_r5n, 0), _k54); _sum0 = __msa_fmadd_w(_sum0, (v4f32)__msa_splati_w(_r5n, 1), _k55); _sum0 = __msa_fmadd_w(_sum0, (v4f32)__msa_splati_w(_r5n, 2), _k56); v4f32 _k60 = (v4f32)__msa_ld_w(kptr, 0); v4f32 _k61 = (v4f32)__msa_ld_w(kptr + 4, 0); v4f32 _k62 = (v4f32)__msa_ld_w(kptr + 4 * 2, 0); v4f32 _k63 = (v4f32)__msa_ld_w(kptr + 4 * 3, 0); v4f32 _k64 = (v4f32)__msa_ld_w(kptr + 4 * 4, 0); v4f32 _k65 = (v4f32)__msa_ld_w(kptr + 4 * 5, 0); v4f32 _k66 = (v4f32)__msa_ld_w(kptr + 4 * 6, 0); kptr -= 4 * 42; v4i32 _r6 = __msa_ld_w(r6, 0); v4i32 _r6n = __msa_ld_w(r6 + 4, 0); _sum0 = __msa_fmadd_w(_sum0, (v4f32)__msa_splati_w(_r6, 0), _k60); _sum0 = __msa_fmadd_w(_sum0, (v4f32)__msa_splati_w(_r6, 1), _k61); _sum0 = __msa_fmadd_w(_sum0, (v4f32)__msa_splati_w(_r6, 2), _k62); _sum0 = __msa_fmadd_w(_sum0, (v4f32)__msa_splati_w(_r6, 3), _k63); _sum0 = __msa_fmadd_w(_sum0, (v4f32)__msa_splati_w(_r6n, 0), _k64); _sum0 = __msa_fmadd_w(_sum0, (v4f32)__msa_splati_w(_r6n, 1), _k65); _sum0 = __msa_fmadd_w(_sum0, (v4f32)__msa_splati_w(_r6n, 2), _k66); __msa_st_w((v4i32)_sum0, outptr0, 0); outptr0 += 4; r0 += 2; r1 += 2; r2 += 2; r3 += 2; r4 += 2; r5 += 2; r6 += 2; } r0 += tailstep; r1 += tailstep; r2 += tailstep; r3 += tailstep; r4 += tailstep; r5 += tailstep; r6 += tailstep; } } } }
omp_task_final.c
// RUN: %libomp-compile-and-run #include <stdio.h> #include <math.h> #include "omp_testsuite.h" #include "omp_my_sleep.h" int test_omp_task_final() { int tids[NUM_TASKS]; int includedtids[NUM_TASKS]; int i; int error = 0; #pragma omp parallel { #pragma omp single { for (i = 0; i < NUM_TASKS; i++) { /* First we have to store the value of the loop index in a new variable * which will be private for each task because otherwise it will be overwritten * if the execution of the task takes longer than the time which is needed to * enter the next step of the loop! */ int myi; myi = i; #pragma omp task final(i>=10) { tids[myi] = omp_get_thread_num(); /* we generate included tasks for final tasks */ if(myi >= 10) { int included = myi; #pragma omp task { my_sleep (SLEEPTIME); includedtids[included] = omp_get_thread_num(); } /* end of omp included task of the final task */ my_sleep (SLEEPTIME); } /* end of if it is a final task*/ } /* end of omp task */ } /* end of for */ } /* end of single */ } /*end of parallel */ /* Now we ckeck if more than one thread executed the final task and its included task. */ for (i = 10; i < NUM_TASKS; i++) { if (tids[i] != includedtids[i]) { error++; } } return (error==0); } /* end of check_paralel_for_private */ int main() { int i; int num_failed=0; for(i = 0; i < REPETITIONS; i++) { if(!test_omp_task_final()) { num_failed++; } } return num_failed; }
3d7pt.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 4; tile_size[3] = 128; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,8);t1++) { lbp=max(ceild(t1,2),ceild(16*t1-Nt+3,16)); ubp=min(floord(Nt+Nz-4,16),floord(8*t1+Nz+5,16)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(16*t2-Nz,4)),2*t1);t3<=min(min(min(floord(Nt+Ny-4,4),floord(8*t1+Ny+13,4)),floord(16*t2+Ny+12,4)),floord(16*t1-16*t2+Nz+Ny+11,4));t3++) { for (t4=max(max(max(0,ceild(t1-15,16)),ceild(16*t2-Nz-124,128)),ceild(4*t3-Ny-124,128));t4<=min(min(min(min(floord(4*t3+Nx,128),floord(Nt+Nx-4,128)),floord(8*t1+Nx+13,128)),floord(16*t2+Nx+12,128)),floord(16*t1-16*t2+Nz+Nx+11,128));t4++) { for (t5=max(max(max(max(max(0,8*t1),16*t1-16*t2+1),16*t2-Nz+2),4*t3-Ny+2),128*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,8*t1+15),16*t2+14),4*t3+2),128*t4+126),16*t1-16*t2+Nz+13);t5++) { for (t6=max(max(16*t2,t5+1),-16*t1+16*t2+2*t5-15);t6<=min(min(16*t2+15,-16*t1+16*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(4*t3,t5+1);t7<=min(4*t3+3,t5+Ny-2);t7++) { lbv=max(128*t4,t5+1); ubv=min(128*t4+127,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
GB_unaryop__abs_int32_int16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_int32_int16 // op(A') function: GB_tran__abs_int32_int16 // C type: int32_t // A type: int16_t // cast: int32_t cij = (int32_t) aij // unaryop: cij = GB_IABS (aij) #define GB_ATYPE \ int16_t #define GB_CTYPE \ int32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IABS (x) ; // casting #define GB_CASTING(z, x) \ int32_t z = (int32_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_INT32 || GxB_NO_INT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_int32_int16 ( int32_t *restrict Cx, const int16_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_int32_int16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
core_cherk.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/core_blas/core_zherk.c, normal z -> c, Fri Sep 28 17:38:21 2018 * **/ #include <plasma_core_blas.h> #include "plasma_types.h" #include "core_lapack.h" /***************************************************************************//** * * @ingroup core_herk * * Performs one of the Hermitian rank k operations * * \f[ C = \alpha A \times A^H + \beta C, \f] * or * \f[ C = \alpha A^H \times A + \beta C, \f] * * where alpha and beta are real scalars, C is an n-by-n Hermitian * matrix, and A is an n-by-k matrix in the first case and a k-by-n * matrix in the second case. * ******************************************************************************* * * @param[in] uplo * - PlasmaUpper: Upper triangle of C is stored; * - PlasmaLower: Lower triangle of C is stored. * * @param[in] trans * - PlasmaNoTrans: \f[ C = \alpha A \times A^H + \beta C; \f] * - PlasmaConjTrans: \f[ C = \alpha A^H \times A + \beta C. \f] * * @param[in] n * The order of the matrix C. n >= 0. * * @param[in] k * If trans = PlasmaNoTrans, number of columns of the A matrix; * if trans = PlasmaConjTrans, number of rows of the A matrix. * * @param[in] alpha * The scalar alpha. * * @param[in] A * A is an lda-by-ka matrix. * If trans = PlasmaNoTrans, ka = k; * if trans = PlasmaConjTrans, ka = n. * * @param[in] lda * The leading dimension of the array A. * If trans = PlasmaNoTrans, lda >= max(1, n); * if trans = PlasmaConjTrans, lda >= max(1, k). * * @param[in] beta * The scalar beta. * * @param[in,out] C * C is an ldc-by-n matrix. * On exit, the uplo part of the matrix is overwritten * by the uplo part of the updated matrix. * * @param[in] ldc * The leading dimension of the array C. ldc >= max(1, n). * ******************************************************************************/ __attribute__((weak)) void plasma_core_cherk(plasma_enum_t uplo, plasma_enum_t trans, int n, int k, float alpha, const plasma_complex32_t *A, int lda, float beta, plasma_complex32_t *C, int ldc) { cblas_cherk(CblasColMajor, (CBLAS_UPLO)uplo, (CBLAS_TRANSPOSE)trans, n, k, alpha, A, lda, beta, C, ldc); } /******************************************************************************/ void plasma_core_omp_cherk(plasma_enum_t uplo, plasma_enum_t trans, int n, int k, float alpha, const plasma_complex32_t *A, int lda, float beta, plasma_complex32_t *C, int ldc, plasma_sequence_t *sequence, plasma_request_t *request) { int ak; if (trans == PlasmaNoTrans) ak = k; else ak = n; #pragma omp task depend(in:A[0:lda*ak]) \ depend(inout:C[0:ldc*n]) { if (sequence->status == PlasmaSuccess) plasma_core_cherk(uplo, trans, n, k, alpha, A, lda, beta, C, ldc); } }
GB_unaryop__lnot_uint64_uint16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_uint64_uint16 // op(A') function: GB_tran__lnot_uint64_uint16 // C type: uint64_t // A type: uint16_t // cast: uint64_t cij = (uint64_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ uint16_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, x) \ uint64_t z = (uint64_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_UINT64 || GxB_NO_UINT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_uint64_uint16 ( uint64_t *restrict Cx, const uint16_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_uint64_uint16 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unop__signum_fc32_fc32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__signum_fc32_fc32 // op(A') function: GB_unop_tran__signum_fc32_fc32 // C type: GxB_FC32_t // A type: GxB_FC32_t // cast: GxB_FC32_t cij = aij // unaryop: cij = GB_csignumf (aij) #define GB_ATYPE \ GxB_FC32_t #define GB_CTYPE \ GxB_FC32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_csignumf (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = aij ; \ Cx [pC] = GB_csignumf (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_SIGNUM || GxB_NO_FC32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__signum_fc32_fc32 ( GxB_FC32_t *Cx, // Cx and Ax may be aliased const GxB_FC32_t *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC32_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = GB_csignumf (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = GB_csignumf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__signum_fc32_fc32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
seidel.pluto.par.c
#include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <math.h> double A[N][N+17]; void init_arrays() { int i, j; for (i=0; i<N; i++) for (j=0; j<N; j++) A[i][j] = i*i+j*j; } double rtclock() { struct timezone tzp; struct timeval tp; int stat; gettimeofday (&tp, &tzp); return (tp.tv_sec + tp.tv_usec*1.0e-6); } int main() { init_arrays(); double annot_t_start=0, annot_t_end=0, annot_t_total=0; int annot_i; for (annot_i=0; annot_i<REPS; annot_i++) { annot_t_start = rtclock(); register int i,j,t; #include <math.h> #include <assert.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) #define S1(zT0,zT1,zT2,zT3,zT4,zT5,t,i,j) {A[i][j]=(A[1+i][1+j]+A[1+i][j]+A[1+i][j-1]+A[i][1+j]+A[i][j]+A[i][j-1]+A[i-1][1+j]+A[i-1][j]+A[i-1][j-1])/9;} int c1, c2, c3, c4, c5, c6, c7, c8, c9; register int lb, ub, lb1, ub1, lb2, ub2; register int lbv, ubv; /* Generated from PLuTo-produced CLooG file by CLooG v0.14.1 64 bits in 4.44s. */ for (c1=-1;c1<=floord(2*T+N-4,256);c1++) { lb1=max(max(0,ceild(128*c1-127,256)),ceild(256*c1-T+1,256)); ub1=min(min(floord(T+N-3,256),floord(256*c1+255,256)),floord(256*c1+N+253,512)); #pragma omp parallel for shared(c1,lb1,ub1) private(c2,c3,c4,c5,c6,c7,c8,c9) for (c2=lb1; c2<=ub1; c2++) { for (c3=max(max(max(max(ceild(128*c2-127,128),ceild(512*c2-N-252,256)),ceild(128*c1-127,128)),0),ceild(512*c1-512*c2-253,256));c3<=min(min(min(min(floord(256*c1-256*c2+N+253,128),floord(256*c1+N+508,256)),floord(T+N-3,128)),floord(512*c2+N+507,256)),floord(256*c2+T+N+252,256));c3++) { for (c4=max(max(max(max(8*c1-8*c2,ceild(128*c3-N-29,32)),0),ceild(-256*c2+256*c3-N-284,32)),ceild(256*c2-N-29,32));c4<=min(min(min(min(8*c1-8*c2+7,floord(T-1,32)),floord(128*c2+127,16)),floord(-128*c2+128*c3+127,16)),floord(256*c3+253,64));c4++) { for (c5=max(max(max(max(max(8*c2,0),ceild(16*c4-15,16)),ceild(256*c3-32*c4-N-60,32)),ceild(256*c3-T-N-28,32)),ceild(256*c3-N-59,64));c5<=min(min(min(min(min(floord(256*c3+N+252,64),floord(32*c4+N+29,32)),floord(128*c3-16*c4+127,16)),floord(T+N-3,32)),8*c2+7),floord(128*c3+127,16));c5++) { for (c6=max(max(max(max(max(ceild(64*c4-29,32),8*c3),ceild(16*c4+16*c5-15,16)),ceild(64*c5-N-28,32)),ceild(16*c5-15,16)),0);c6<=min(min(min(min(min(8*c3+7,floord(64*c5+N+59,32)),floord(T+N-3,16)),floord(32*c4+N+29,16)),floord(32*c4+32*c5+N+60,32)),floord(32*c5+T+N+28,32));c6++) { for (c7=max(max(max(max(0,32*c4),16*c6-N+2),32*c5-N+2),-32*c5+32*c6-N-29);c7<=min(min(min(min(32*c5+30,32*c4+31),floord(32*c6+29,2)),-32*c5+32*c6+30),T-1);c7++) { /*@ begin Loop( transform UnrollJam(ufactor=8) for (c8=max(max(32*c5,32*c6-c7-N+2),c7+1);c8<=min(min(c7+N-2,32*c5+31),32*c6-c7+30);c8++) transform Unroll(ufactor=8) for (c9=max(32*c6,c7+c8+1);c9<=min(c7+c8+N-2,32*c6+31);c9++) { S1(c1-c2,-c1+2*c2,-c1+c3,c4,-c4+c5,-c4-c5+c6,c7,-c7+c8,-c7-c8+c9) ; } ) @*/{ for (c8=max(max(32*c5,32*c6-c7-N+2),c7+1); c8<=min(min(c7+N-2,32*c5+31),32*c6-c7+30)-7; c8=c8+8) { for (c9=max(32*c6,c7+c8+1); c9<=min(c7+c8+N-2,32*c6+31)-7; c9=c9+8) { S1(c1-c2,-c1+2*c2,-c1+c3,c4,-c4+c5,-c4+c6-c5,c7,-c7+c8,-c7+c9-c8); S1(c1-c2,-c1+2*c2,-c1+c3,c4,-c4+c5,-c4+c6-c5,c7,-c7+c8,-c7+c9-c8+1); S1(c1-c2,-c1+2*c2,-c1+c3,c4,-c4+c5,-c4+c6-c5,c7,-c7+c8,-c7+c9-c8+2); S1(c1-c2,-c1+2*c2,-c1+c3,c4,-c4+c5,-c4+c6-c5,c7,-c7+c8,-c7+c9-c8+3); S1(c1-c2,-c1+2*c2,-c1+c3,c4,-c4+c5,-c4+c6-c5,c7,-c7+c8,-c7+c9-c8+4); S1(c1-c2,-c1+2*c2,-c1+c3,c4,-c4+c5,-c4+c6-c5,c7,-c7+c8,-c7+c9-c8+5); S1(c1-c2,-c1+2*c2,-c1+c3,c4,-c4+c5,-c4+c6-c5,c7,-c7+c8,-c7+c9-c8+6); S1(c1-c2,-c1+2*c2,-c1+c3,c4,-c4+c5,-c4+c6-c5,c7,-c7+c8,-c7+c9-c8+7); } for (c9=max(32*c6,c7+c8+2); c9<=min(c7+c8+N-1,32*c6+31)-7; c9=c9+8) { S1(c1-c2,-c1+2*c2,-c1+c3,c4,-c4+c5,-c4+c6-c5,c7,-c7+c8+1,-c7+c9-c8-1); S1(c1-c2,-c1+2*c2,-c1+c3,c4,-c4+c5,-c4+c6-c5,c7,-c7+c8+1,-c7+c9-c8); S1(c1-c2,-c1+2*c2,-c1+c3,c4,-c4+c5,-c4+c6-c5,c7,-c7+c8+1,-c7+c9-c8+1); S1(c1-c2,-c1+2*c2,-c1+c3,c4,-c4+c5,-c4+c6-c5,c7,-c7+c8+1,-c7+c9-c8+2); S1(c1-c2,-c1+2*c2,-c1+c3,c4,-c4+c5,-c4+c6-c5,c7,-c7+c8+1,-c7+c9-c8+3); S1(c1-c2,-c1+2*c2,-c1+c3,c4,-c4+c5,-c4+c6-c5,c7,-c7+c8+1,-c7+c9-c8+4); S1(c1-c2,-c1+2*c2,-c1+c3,c4,-c4+c5,-c4+c6-c5,c7,-c7+c8+1,-c7+c9-c8+5); S1(c1-c2,-c1+2*c2,-c1+c3,c4,-c4+c5,-c4+c6-c5,c7,-c7+c8+1,-c7+c9-c8+6); } for (c9=max(32*c6,c7+c8+3); c9<=min(c7+c8+N,32*c6+31)-7; c9=c9+8) { S1(c1-c2,-c1+2*c2,-c1+c3,c4,-c4+c5,-c4+c6-c5,c7,-c7+c8+2,-c7+c9-c8-2); S1(c1-c2,-c1+2*c2,-c1+c3,c4,-c4+c5,-c4+c6-c5,c7,-c7+c8+2,-c7+c9-c8-1); S1(c1-c2,-c1+2*c2,-c1+c3,c4,-c4+c5,-c4+c6-c5,c7,-c7+c8+2,-c7+c9-c8); S1(c1-c2,-c1+2*c2,-c1+c3,c4,-c4+c5,-c4+c6-c5,c7,-c7+c8+2,-c7+c9-c8+1); S1(c1-c2,-c1+2*c2,-c1+c3,c4,-c4+c5,-c4+c6-c5,c7,-c7+c8+2,-c7+c9-c8+2); S1(c1-c2,-c1+2*c2,-c1+c3,c4,-c4+c5,-c4+c6-c5,c7,-c7+c8+2,-c7+c9-c8+3); S1(c1-c2,-c1+2*c2,-c1+c3,c4,-c4+c5,-c4+c6-c5,c7,-c7+c8+2,-c7+c9-c8+4); S1(c1-c2,-c1+2*c2,-c1+c3,c4,-c4+c5,-c4+c6-c5,c7,-c7+c8+2,-c7+c9-c8+5); } for (c9=max(32*c6,c7+c8+4); c9<=min(c7+c8+N+1,32*c6+31)-7; c9=c9+8) { S1(c1-c2,-c1+2*c2,-c1+c3,c4,-c4+c5,-c4+c6-c5,c7,-c7+c8+3,-c7+c9-c8-3); S1(c1-c2,-c1+2*c2,-c1+c3,c4,-c4+c5,-c4+c6-c5,c7,-c7+c8+3,-c7+c9-c8-2); S1(c1-c2,-c1+2*c2,-c1+c3,c4,-c4+c5,-c4+c6-c5,c7,-c7+c8+3,-c7+c9-c8-1); S1(c1-c2,-c1+2*c2,-c1+c3,c4,-c4+c5,-c4+c6-c5,c7,-c7+c8+3,-c7+c9-c8); S1(c1-c2,-c1+2*c2,-c1+c3,c4,-c4+c5,-c4+c6-c5,c7,-c7+c8+3,-c7+c9-c8+1); S1(c1-c2,-c1+2*c2,-c1+c3,c4,-c4+c5,-c4+c6-c5,c7,-c7+c8+3,-c7+c9-c8+2); S1(c1-c2,-c1+2*c2,-c1+c3,c4,-c4+c5,-c4+c6-c5,c7,-c7+c8+3,-c7+c9-c8+3); S1(c1-c2,-c1+2*c2,-c1+c3,c4,-c4+c5,-c4+c6-c5,c7,-c7+c8+3,-c7+c9-c8+4); } for (c9=max(32*c6,c7+c8+5); c9<=min(c7+c8+N+2,32*c6+31)-7; c9=c9+8) { S1(c1-c2,-c1+2*c2,-c1+c3,c4,-c4+c5,-c4+c6-c5,c7,-c7+c8+4,-c7+c9-c8-4); S1(c1-c2,-c1+2*c2,-c1+c3,c4,-c4+c5,-c4+c6-c5,c7,-c7+c8+4,-c7+c9-c8-3); S1(c1-c2,-c1+2*c2,-c1+c3,c4,-c4+c5,-c4+c6-c5,c7,-c7+c8+4,-c7+c9-c8-2); S1(c1-c2,-c1+2*c2,-c1+c3,c4,-c4+c5,-c4+c6-c5,c7,-c7+c8+4,-c7+c9-c8-1); S1(c1-c2,-c1+2*c2,-c1+c3,c4,-c4+c5,-c4+c6-c5,c7,-c7+c8+4,-c7+c9-c8); S1(c1-c2,-c1+2*c2,-c1+c3,c4,-c4+c5,-c4+c6-c5,c7,-c7+c8+4,-c7+c9-c8+1); S1(c1-c2,-c1+2*c2,-c1+c3,c4,-c4+c5,-c4+c6-c5,c7,-c7+c8+4,-c7+c9-c8+2); S1(c1-c2,-c1+2*c2,-c1+c3,c4,-c4+c5,-c4+c6-c5,c7,-c7+c8+4,-c7+c9-c8+3); } for (c9=max(32*c6,c7+c8+6); c9<=min(c7+c8+N+3,32*c6+31)-7; c9=c9+8) { S1(c1-c2,-c1+2*c2,-c1+c3,c4,-c4+c5,-c4+c6-c5,c7,-c7+c8+5,-c7+c9-c8-5); S1(c1-c2,-c1+2*c2,-c1+c3,c4,-c4+c5,-c4+c6-c5,c7,-c7+c8+5,-c7+c9-c8-4); S1(c1-c2,-c1+2*c2,-c1+c3,c4,-c4+c5,-c4+c6-c5,c7,-c7+c8+5,-c7+c9-c8-3); S1(c1-c2,-c1+2*c2,-c1+c3,c4,-c4+c5,-c4+c6-c5,c7,-c7+c8+5,-c7+c9-c8-2); S1(c1-c2,-c1+2*c2,-c1+c3,c4,-c4+c5,-c4+c6-c5,c7,-c7+c8+5,-c7+c9-c8-1); S1(c1-c2,-c1+2*c2,-c1+c3,c4,-c4+c5,-c4+c6-c5,c7,-c7+c8+5,-c7+c9-c8); S1(c1-c2,-c1+2*c2,-c1+c3,c4,-c4+c5,-c4+c6-c5,c7,-c7+c8+5,-c7+c9-c8+1); S1(c1-c2,-c1+2*c2,-c1+c3,c4,-c4+c5,-c4+c6-c5,c7,-c7+c8+5,-c7+c9-c8+2); } for (c9=max(32*c6,c7+c8+7); c9<=min(c7+c8+N+4,32*c6+31)-7; c9=c9+8) { S1(c1-c2,-c1+2*c2,-c1+c3,c4,-c4+c5,-c4+c6-c5,c7,-c7+c8+6,-c7+c9-c8-6); S1(c1-c2,-c1+2*c2,-c1+c3,c4,-c4+c5,-c4+c6-c5,c7,-c7+c8+6,-c7+c9-c8-5); S1(c1-c2,-c1+2*c2,-c1+c3,c4,-c4+c5,-c4+c6-c5,c7,-c7+c8+6,-c7+c9-c8-4); S1(c1-c2,-c1+2*c2,-c1+c3,c4,-c4+c5,-c4+c6-c5,c7,-c7+c8+6,-c7+c9-c8-3); S1(c1-c2,-c1+2*c2,-c1+c3,c4,-c4+c5,-c4+c6-c5,c7,-c7+c8+6,-c7+c9-c8-2); S1(c1-c2,-c1+2*c2,-c1+c3,c4,-c4+c5,-c4+c6-c5,c7,-c7+c8+6,-c7+c9-c8-1); S1(c1-c2,-c1+2*c2,-c1+c3,c4,-c4+c5,-c4+c6-c5,c7,-c7+c8+6,-c7+c9-c8); S1(c1-c2,-c1+2*c2,-c1+c3,c4,-c4+c5,-c4+c6-c5,c7,-c7+c8+6,-c7+c9-c8+1); } for (c9=max(32*c6,c7+c8+8); c9<=min(c7+c8+N+5,32*c6+31)-7; c9=c9+8) { S1(c1-c2,-c1+2*c2,-c1+c3,c4,-c4+c5,-c4+c6-c5,c7,-c7+c8+7,-c7+c9-c8-7); S1(c1-c2,-c1+2*c2,-c1+c3,c4,-c4+c5,-c4+c6-c5,c7,-c7+c8+7,-c7+c9-c8-6); S1(c1-c2,-c1+2*c2,-c1+c3,c4,-c4+c5,-c4+c6-c5,c7,-c7+c8+7,-c7+c9-c8-5); S1(c1-c2,-c1+2*c2,-c1+c3,c4,-c4+c5,-c4+c6-c5,c7,-c7+c8+7,-c7+c9-c8-4); S1(c1-c2,-c1+2*c2,-c1+c3,c4,-c4+c5,-c4+c6-c5,c7,-c7+c8+7,-c7+c9-c8-3); S1(c1-c2,-c1+2*c2,-c1+c3,c4,-c4+c5,-c4+c6-c5,c7,-c7+c8+7,-c7+c9-c8-2); S1(c1-c2,-c1+2*c2,-c1+c3,c4,-c4+c5,-c4+c6-c5,c7,-c7+c8+7,-c7+c9-c8-1); S1(c1-c2,-c1+2*c2,-c1+c3,c4,-c4+c5,-c4+c6-c5,c7,-c7+c8+7,-c7+c9-c8); } for (; c9<=min(c7+c8+N-2,32*c6+31); c9=c9+1) S1(c1-c2,-c1+2*c2,-c1+c3,c4,-c4+c5,-c4+c6-c5,c7,-c7+c8,-c7+c9-c8); for (; c9<=min(c7+c8+N-1,32*c6+31); c9=c9+1) { S1(c1-c2,-c1+2*c2,-c1+c3,c4,-c4+c5,-c4+c6-c5,c7,-c7+c8+1,-c7+c9-c8-1); } for (; c9<=min(c7+c8+N,32*c6+31); c9=c9+1) { S1(c1-c2,-c1+2*c2,-c1+c3,c4,-c4+c5,-c4+c6-c5,c7,-c7+c8+2,-c7+c9-c8-2); } for (; c9<=min(c7+c8+N+1,32*c6+31); c9=c9+1) { S1(c1-c2,-c1+2*c2,-c1+c3,c4,-c4+c5,-c4+c6-c5,c7,-c7+c8+3,-c7+c9-c8-3); } for (; c9<=min(c7+c8+N+2,32*c6+31); c9=c9+1) { S1(c1-c2,-c1+2*c2,-c1+c3,c4,-c4+c5,-c4+c6-c5,c7,-c7+c8+4,-c7+c9-c8-4); } for (; c9<=min(c7+c8+N+3,32*c6+31); c9=c9+1) { S1(c1-c2,-c1+2*c2,-c1+c3,c4,-c4+c5,-c4+c6-c5,c7,-c7+c8+5,-c7+c9-c8-5); } for (; c9<=min(c7+c8+N+4,32*c6+31); c9=c9+1) { S1(c1-c2,-c1+2*c2,-c1+c3,c4,-c4+c5,-c4+c6-c5,c7,-c7+c8+6,-c7+c9-c8-6); } for (; c9<=min(c7+c8+N+5,32*c6+31); c9=c9+1) { S1(c1-c2,-c1+2*c2,-c1+c3,c4,-c4+c5,-c4+c6-c5,c7,-c7+c8+7,-c7+c9-c8-7); } } for (; c8<=min(min(c7+N-2,32*c5+31),32*c6-c7+30); c8=c8+1) { for (c9=max(32*c6,c7+c8+1); c9<=min(c7+c8+N-2,32*c6+31)-7; c9=c9+8) { S1(c1-c2,-c1+2*c2,-c1+c3,c4,-c4+c5,-c4+c6-c5,c7,-c7+c8,-c7+c9-c8); S1(c1-c2,-c1+2*c2,-c1+c3,c4,-c4+c5,-c4+c6-c5,c7,-c7+c8,-c7+c9-c8+1); S1(c1-c2,-c1+2*c2,-c1+c3,c4,-c4+c5,-c4+c6-c5,c7,-c7+c8,-c7+c9-c8+2); S1(c1-c2,-c1+2*c2,-c1+c3,c4,-c4+c5,-c4+c6-c5,c7,-c7+c8,-c7+c9-c8+3); S1(c1-c2,-c1+2*c2,-c1+c3,c4,-c4+c5,-c4+c6-c5,c7,-c7+c8,-c7+c9-c8+4); S1(c1-c2,-c1+2*c2,-c1+c3,c4,-c4+c5,-c4+c6-c5,c7,-c7+c8,-c7+c9-c8+5); S1(c1-c2,-c1+2*c2,-c1+c3,c4,-c4+c5,-c4+c6-c5,c7,-c7+c8,-c7+c9-c8+6); S1(c1-c2,-c1+2*c2,-c1+c3,c4,-c4+c5,-c4+c6-c5,c7,-c7+c8,-c7+c9-c8+7); } for (; c9<=min(c7+c8+N-2,32*c6+31); c9=c9+1) { S1(c1-c2,-c1+2*c2,-c1+c3,c4,-c4+c5,-c4-c5+c6,c7,-c7+c8,-c7-c8+c9); } } } /*@ end @*/ } } } } } } } /* End of CLooG code */ annot_t_end = rtclock(); annot_t_total += annot_t_end - annot_t_start; } annot_t_total = annot_t_total / REPS; printf("%f\n", annot_t_total); return ((int) A[0][0]); }
gen_fffc.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ #include "_hypre_utilities.h" #include "hypre_hopscotch_hash.h" #include "_hypre_parcsr_mv.h" #include "_hypre_lapack.h" #include "_hypre_blas.h" /* ----------------------------------------------------------------------------- * generate AFF or AFC * ----------------------------------------------------------------------------- */ HYPRE_Int hypre_ParCSRMatrixGenerateFFFC( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, HYPRE_BigInt *cpts_starts, hypre_ParCSRMatrix *S, hypre_ParCSRMatrix **A_FC_ptr, hypre_ParCSRMatrix **A_FF_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); HYPRE_MemoryLocation memory_location_P = hypre_ParCSRMatrixMemoryLocation(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; /* diag part of A */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Complex *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); /* off-diag part of A */ hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Complex *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); /* diag part of S */ hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); /* off-diag part of S */ hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); hypre_ParCSRMatrix *A_FC; hypre_CSRMatrix *A_FC_diag, *A_FC_offd; HYPRE_Int *A_FC_diag_i, *A_FC_diag_j, *A_FC_offd_i, *A_FC_offd_j=NULL; HYPRE_Complex *A_FC_diag_data, *A_FC_offd_data=NULL; HYPRE_Int num_cols_offd_A_FC; HYPRE_BigInt *col_map_offd_A_FC = NULL; hypre_ParCSRMatrix *A_FF; hypre_CSRMatrix *A_FF_diag, *A_FF_offd; HYPRE_Int *A_FF_diag_i, *A_FF_diag_j, *A_FF_offd_i, *A_FF_offd_j; HYPRE_Complex *A_FF_diag_data, *A_FF_offd_data; HYPRE_Int num_cols_offd_A_FF; HYPRE_BigInt *col_map_offd_A_FF = NULL; HYPRE_Int *fine_to_coarse; HYPRE_Int *fine_to_fine; HYPRE_Int *fine_to_coarse_offd = NULL; HYPRE_Int *fine_to_fine_offd = NULL; HYPRE_Int i, j, jj; HYPRE_Int startc, index; HYPRE_Int cpt, fpt, row; HYPRE_Int *CF_marker_offd = NULL, *marker_offd=NULL; HYPRE_Int *int_buf_data = NULL; HYPRE_BigInt *big_convert; HYPRE_BigInt *big_convert_offd = NULL; HYPRE_BigInt *big_buf_data = NULL; HYPRE_BigInt total_global_fpts, total_global_cpts, *fpts_starts; HYPRE_Int my_id, num_procs, num_sends; HYPRE_Int d_count_FF, d_count_FC, o_count_FF, o_count_FC; HYPRE_Int n_Fpts; HYPRE_Int *cpt_array, *fpt_array; HYPRE_Int start, stop; HYPRE_Int num_threads; num_threads = hypre_NumThreads(); /* MPI size and rank*/ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); fine_to_fine = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); big_convert = hypre_CTAlloc(HYPRE_BigInt, n_fine, HYPRE_MEMORY_HOST); cpt_array = hypre_CTAlloc(HYPRE_Int, num_threads+1, HYPRE_MEMORY_HOST); fpt_array = hypre_CTAlloc(HYPRE_Int, num_threads+1, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i,j,jj,start,stop,row,cpt,fpt,d_count_FC,d_count_FF,o_count_FC,o_count_FF) #endif { HYPRE_Int my_thread_num = hypre_GetThreadNum(); start = (n_fine/num_threads)*my_thread_num; if (my_thread_num == num_threads-1) { stop = n_fine; } else { stop = (n_fine/num_threads)*(my_thread_num+1); } for (i=start; i < stop; i++) { if (CF_marker[i] > 0) { cpt_array[my_thread_num+1]++; } else { fpt_array[my_thread_num+1]++; } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (my_thread_num == 0) { for (i=1; i < num_threads; i++) { cpt_array[i+1] += cpt_array[i]; fpt_array[i+1] += fpt_array[i]; } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif cpt = cpt_array[my_thread_num]; fpt = fpt_array[my_thread_num]; for (i=start; i < stop; i++) { if (CF_marker[i] > 0) { fine_to_coarse[i] = cpt++; fine_to_fine[i] = -1; } else { fine_to_fine[i] = fpt++; fine_to_coarse[i] = -1; } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (my_thread_num == 0) { n_Fpts = fpt_array[num_threads]; #ifdef HYPRE_NO_GLOBAL_PARTITION fpts_starts = hypre_TAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); hypre_MPI_Scan(&n_Fpts, fpts_starts+1, 1, HYPRE_MPI_BIG_INT, hypre_MPI_SUM, comm); fpts_starts[0] = fpts_starts[1] - n_Fpts; if (my_id == num_procs - 1) { total_global_fpts = fpts_starts[1]; total_global_cpts = cpts_starts[1]; } hypre_MPI_Bcast(&total_global_fpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); #else fpts_starts = hypre_CTAlloc(HYPRE_BigInt, num_procs+1, HYPRE_MEMORY_HOST); hypre_MPI_Allgather(&n_Fpts, 1, HYPRE_MPI_BIG_INT, &fpts_starts[1], 1, HYPRE_MPI_BIG_INT, comm); fpts_starts[0] = 0; for (i = 2; i < num_procs+1; i++) { fpts_starts[i] += fpts_starts[i-1]; } total_global_fpts = fpts_starts[num_procs]; total_global_cpts = cpts_starts[num_procs]; #endif } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif for (i=start; i < stop; i++) { if (CF_marker[i] > 0) { #ifdef HYPRE_NO_GLOBAL_PARTITION big_convert[i] = (HYPRE_BigInt)fine_to_coarse[i] + cpts_starts[0]; #else big_convert[i] = (HYPRE_BigInt)fine_to_coarse[i] + cpts_starts[my_id]; #endif } else { #ifdef HYPRE_NO_GLOBAL_PARTITION big_convert[i] = (HYPRE_BigInt)fine_to_fine[i] + fpts_starts[0]; #else big_convert[i] = (HYPRE_BigInt)fine_to_fine[i] + fpts_starts[my_id]; #endif } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (my_thread_num == 0) { if (num_cols_A_offd) { CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); big_convert_offd = hypre_CTAlloc(HYPRE_BigInt, num_cols_A_offd, HYPRE_MEMORY_HOST); fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); fine_to_fine_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); } index = 0; num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); big_buf_data = hypre_CTAlloc(HYPRE_BigInt, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); for (i = 0; i < num_sends; i++) { startc = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = startc; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) { int_buf_data[index] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; big_buf_data[index++] = big_convert[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = hypre_ParCSRCommHandleCreate( 21, comm_pkg, big_buf_data, big_convert_offd); hypre_ParCSRCommHandleDestroy(comm_handle); marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); for (i = 0; i < n_fine; i++) { if (CF_marker[i] < 0) { for (j = S_offd_i[i]; j < S_offd_i[i+1]; j++) { marker_offd[S_offd_j[j]] = 1; } } } num_cols_offd_A_FC = 0; num_cols_offd_A_FF = 0; if (num_cols_A_offd) { for (i=0; i < num_cols_A_offd; i++) { if (CF_marker_offd[i] > 0 && marker_offd[i] > 0) { fine_to_coarse_offd[i] = num_cols_offd_A_FC++; fine_to_fine_offd[i] = -1; } else if (CF_marker_offd[i] < 0 && marker_offd[i] > 0) { fine_to_fine_offd[i] = num_cols_offd_A_FF++; fine_to_coarse_offd[i] = -1; } } col_map_offd_A_FF = hypre_TAlloc(HYPRE_BigInt, num_cols_offd_A_FF, HYPRE_MEMORY_HOST); col_map_offd_A_FC = hypre_TAlloc(HYPRE_BigInt, num_cols_offd_A_FC, HYPRE_MEMORY_HOST); cpt = 0; fpt = 0; for (i=0; i < num_cols_A_offd; i++) { if (CF_marker_offd[i] > 0 && marker_offd[i] > 0) { col_map_offd_A_FC[cpt++] = big_convert_offd[i]; } else if (CF_marker_offd[i] < 0 && marker_offd[i] > 0) { col_map_offd_A_FF[fpt++] = big_convert_offd[i]; } } } A_FF_diag_i = hypre_CTAlloc(HYPRE_Int,n_Fpts+1, memory_location_P); A_FC_diag_i = hypre_CTAlloc(HYPRE_Int,n_Fpts+1, memory_location_P); A_FF_offd_i = hypre_CTAlloc(HYPRE_Int,n_Fpts+1, memory_location_P); A_FC_offd_i = hypre_CTAlloc(HYPRE_Int,n_Fpts+1, memory_location_P); } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif d_count_FC = 0; d_count_FF = 0; o_count_FC = 0; o_count_FF = 0; row = fpt_array[my_thread_num]; for (i=start; i < stop; i++) { if (CF_marker[i] < 0) { row++; d_count_FF++; /* account for diagonal element */ for (j = S_diag_i[i]; j < S_diag_i[i+1]; j++) { jj = S_diag_j[j]; if (CF_marker[jj] > 0) d_count_FC++; else d_count_FF++; } A_FF_diag_i[row] = d_count_FF; A_FC_diag_i[row] = d_count_FC; for (j = S_offd_i[i]; j < S_offd_i[i+1]; j++) { jj = S_offd_j[j]; if (CF_marker_offd[jj] > 0) o_count_FC++; else o_count_FF++; } A_FF_offd_i[row] = o_count_FF; A_FC_offd_i[row] = o_count_FC; } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (my_thread_num == 0) { HYPRE_Int fpt2; for (i=1; i<num_threads+1; i++) { fpt = fpt_array[i]; fpt2 = fpt_array[i-1]; if (fpt == fpt2) { continue; } A_FC_diag_i[fpt] += A_FC_diag_i[fpt2]; A_FF_diag_i[fpt] += A_FF_diag_i[fpt2]; A_FC_offd_i[fpt] += A_FC_offd_i[fpt2]; A_FF_offd_i[fpt] += A_FF_offd_i[fpt2]; } row = fpt_array[num_threads]; d_count_FC = A_FC_diag_i[row]; d_count_FF = A_FF_diag_i[row]; o_count_FC = A_FC_offd_i[row]; o_count_FF = A_FF_offd_i[row]; A_FF_diag_j = hypre_CTAlloc(HYPRE_Int,d_count_FF, memory_location_P); A_FC_diag_j = hypre_CTAlloc(HYPRE_Int,d_count_FC, memory_location_P); A_FF_offd_j = hypre_CTAlloc(HYPRE_Int,o_count_FF, memory_location_P); A_FC_offd_j = hypre_CTAlloc(HYPRE_Int,o_count_FC, memory_location_P); A_FF_diag_data = hypre_CTAlloc(HYPRE_Real,d_count_FF, memory_location_P); A_FC_diag_data = hypre_CTAlloc(HYPRE_Real,d_count_FC, memory_location_P); A_FF_offd_data = hypre_CTAlloc(HYPRE_Real,o_count_FF, memory_location_P); A_FC_offd_data = hypre_CTAlloc(HYPRE_Real,o_count_FC, memory_location_P); } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif row = fpt_array[my_thread_num]; d_count_FC = A_FC_diag_i[row]; d_count_FF = A_FF_diag_i[row]; o_count_FC = A_FC_offd_i[row]; o_count_FF = A_FF_offd_i[row]; for (i=start; i < stop; i++) { if (CF_marker[i] < 0) { HYPRE_Int jS, jA; row++; jA = A_diag_i[i]; A_FF_diag_j[d_count_FF] = fine_to_fine[A_diag_j[jA]]; A_FF_diag_data[d_count_FF++] = A_diag_data[jA++]; for (j = S_diag_i[i]; j < S_diag_i[i+1]; j++) { jA = A_diag_i[i]+1; jS = S_diag_j[j]; while (A_diag_j[jA] != jS) jA++; if (CF_marker[S_diag_j[j]] > 0) { A_FC_diag_j[d_count_FC] = fine_to_coarse[A_diag_j[jA]]; A_FC_diag_data[d_count_FC++] = A_diag_data[jA++]; } else { A_FF_diag_j[d_count_FF] = fine_to_fine[A_diag_j[jA]]; A_FF_diag_data[d_count_FF++] = A_diag_data[jA++]; } } A_FF_diag_i[row] = d_count_FF; A_FC_diag_i[row] = d_count_FC; for (j = S_offd_i[i]; j < S_offd_i[i+1]; j++) { jA = A_offd_i[i]; jS = S_offd_j[j]; while (jS != A_offd_j[jA]) jA++; if (CF_marker_offd[S_offd_j[j]] > 0) { A_FC_offd_j[o_count_FC] = fine_to_coarse_offd[A_offd_j[jA]]; A_FC_offd_data[o_count_FC++] = A_offd_data[jA++]; } else { A_FF_offd_j[o_count_FF] = fine_to_fine_offd[A_offd_j[jA]]; A_FF_offd_data[o_count_FF++] = A_offd_data[jA++]; } } A_FF_offd_i[row] = o_count_FF; A_FC_offd_i[row] = o_count_FC; } } } /*end parallel region */ A_FC = hypre_ParCSRMatrixCreate(comm, total_global_fpts, total_global_cpts, fpts_starts, cpts_starts, num_cols_offd_A_FC, A_FC_diag_i[n_Fpts], A_FC_offd_i[n_Fpts]); A_FF = hypre_ParCSRMatrixCreate(comm, total_global_fpts, total_global_fpts, fpts_starts, fpts_starts, num_cols_offd_A_FF, A_FF_diag_i[n_Fpts], A_FF_offd_i[n_Fpts]); A_FC_diag = hypre_ParCSRMatrixDiag(A_FC); hypre_CSRMatrixData(A_FC_diag) = A_FC_diag_data; hypre_CSRMatrixI(A_FC_diag) = A_FC_diag_i; hypre_CSRMatrixJ(A_FC_diag) = A_FC_diag_j; A_FC_offd = hypre_ParCSRMatrixOffd(A_FC); hypre_CSRMatrixData(A_FC_offd) = A_FC_offd_data; hypre_CSRMatrixI(A_FC_offd) = A_FC_offd_i; hypre_CSRMatrixJ(A_FC_offd) = A_FC_offd_j; hypre_ParCSRMatrixOwnsRowStarts(A_FC) = 1; hypre_ParCSRMatrixOwnsColStarts(A_FC) = 0; hypre_ParCSRMatrixColMapOffd(A_FC) = col_map_offd_A_FC; hypre_CSRMatrixMemoryLocation(A_FC_diag) = memory_location_P; hypre_CSRMatrixMemoryLocation(A_FC_offd) = memory_location_P; A_FF_diag = hypre_ParCSRMatrixDiag(A_FF); hypre_CSRMatrixData(A_FF_diag) = A_FF_diag_data; hypre_CSRMatrixI(A_FF_diag) = A_FF_diag_i; hypre_CSRMatrixJ(A_FF_diag) = A_FF_diag_j; A_FF_offd = hypre_ParCSRMatrixOffd(A_FF); hypre_CSRMatrixData(A_FF_offd) = A_FF_offd_data; hypre_CSRMatrixI(A_FF_offd) = A_FF_offd_i; hypre_CSRMatrixJ(A_FF_offd) = A_FF_offd_j; hypre_ParCSRMatrixOwnsRowStarts(A_FF) = 0; hypre_ParCSRMatrixOwnsColStarts(A_FF) = 0; hypre_ParCSRMatrixColMapOffd(A_FF) = col_map_offd_A_FF; hypre_CSRMatrixMemoryLocation(A_FF_diag) = memory_location_P; hypre_CSRMatrixMemoryLocation(A_FF_offd) = memory_location_P; hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_fine, HYPRE_MEMORY_HOST); hypre_TFree(big_convert, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_fine_offd, HYPRE_MEMORY_HOST); hypre_TFree(big_convert_offd, HYPRE_MEMORY_HOST); hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(big_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(cpt_array, HYPRE_MEMORY_HOST); hypre_TFree(fpt_array, HYPRE_MEMORY_HOST); *A_FC_ptr = A_FC; *A_FF_ptr = A_FF; return hypre_error_flag; } /* ----------------------------------------------------------------------------- * generate AFF, AFC, AFC2 for 2 stage interpolation * ----------------------------------------------------------------------------- */ HYPRE_Int hypre_ParCSRMatrixGenerateFFFC3( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, HYPRE_BigInt *cpts_starts, hypre_ParCSRMatrix *S, hypre_ParCSRMatrix **A_FC_ptr, hypre_ParCSRMatrix **A_FF_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); HYPRE_MemoryLocation memory_location_P = hypre_ParCSRMatrixMemoryLocation(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; /* diag part of A */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Complex *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); /* off-diag part of A */ hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Complex *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); /* diag part of S */ hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); /* off-diag part of S */ hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); hypre_ParCSRMatrix *A_FC; hypre_CSRMatrix *A_FC_diag, *A_FC_offd; HYPRE_Int *A_FC_diag_i, *A_FC_diag_j, *A_FC_offd_i, *A_FC_offd_j=NULL; HYPRE_Complex *A_FC_diag_data, *A_FC_offd_data=NULL; HYPRE_Int num_cols_offd_A_FC; HYPRE_BigInt *col_map_offd_A_FC = NULL; hypre_ParCSRMatrix *A_FF; hypre_CSRMatrix *A_FF_diag, *A_FF_offd; HYPRE_Int *A_FF_diag_i, *A_FF_diag_j, *A_FF_offd_i, *A_FF_offd_j; HYPRE_Complex *A_FF_diag_data, *A_FF_offd_data; HYPRE_Int num_cols_offd_A_FF; HYPRE_BigInt *col_map_offd_A_FF = NULL; HYPRE_Int *fine_to_coarse; HYPRE_Int *fine_to_fine; HYPRE_Int *fine_to_coarse_offd = NULL; HYPRE_Int *fine_to_fine_offd = NULL; HYPRE_Int i, j, jj; HYPRE_Int startc, index; HYPRE_Int cpt, fpt, new_fpt, row, rowc; HYPRE_Int *CF_marker_offd = NULL; HYPRE_Int *int_buf_data = NULL; HYPRE_BigInt *big_convert; HYPRE_BigInt *big_convert_offd = NULL; HYPRE_BigInt *big_buf_data = NULL; HYPRE_BigInt total_global_fpts, total_global_cpts, total_global_new_fpts; HYPRE_BigInt *fpts_starts, *new_fpts_starts; HYPRE_Int my_id, num_procs, num_sends; HYPRE_Int d_count_FF, d_count_FC, o_count_FF, o_count_FC; HYPRE_Int n_Fpts; HYPRE_Int n_new_Fpts; HYPRE_Int *cpt_array, *fpt_array, *new_fpt_array; HYPRE_Int start, stop; HYPRE_Int num_threads; num_threads = hypre_NumThreads(); /* MPI size and rank*/ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); fine_to_fine = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); big_convert = hypre_CTAlloc(HYPRE_BigInt, n_fine, HYPRE_MEMORY_HOST); cpt_array = hypre_CTAlloc(HYPRE_Int, num_threads+1, HYPRE_MEMORY_HOST); fpt_array = hypre_CTAlloc(HYPRE_Int, num_threads+1, HYPRE_MEMORY_HOST); new_fpt_array = hypre_CTAlloc(HYPRE_Int, num_threads+1, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i,j,jj,start,stop,row,rowc,cpt,new_fpt,fpt,d_count_FC,d_count_FF,o_count_FC,o_count_FF) #endif { HYPRE_Int my_thread_num = hypre_GetThreadNum(); start = (n_fine/num_threads)*my_thread_num; if (my_thread_num == num_threads-1) { stop = n_fine; } else { stop = (n_fine/num_threads)*(my_thread_num+1); } for (i=start; i < stop; i++) { if (CF_marker[i] > 0) { cpt_array[my_thread_num+1]++; } else if (CF_marker[i] == -2) { new_fpt_array[my_thread_num+1]++; fpt_array[my_thread_num+1]++; } else { fpt_array[my_thread_num+1]++; } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (my_thread_num == 0) { for (i=1; i < num_threads; i++) { cpt_array[i+1] += cpt_array[i]; fpt_array[i+1] += fpt_array[i]; new_fpt_array[i+1] += new_fpt_array[i]; } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif cpt = cpt_array[my_thread_num]; fpt = fpt_array[my_thread_num]; for (i=start; i < stop; i++) { if (CF_marker[i] > 0) { fine_to_coarse[i] = cpt++; fine_to_fine[i] = -1; } else { fine_to_fine[i] = fpt++; fine_to_coarse[i] = -1; } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (my_thread_num == 0) { n_Fpts = fpt_array[num_threads]; n_new_Fpts = new_fpt_array[num_threads]; #ifdef HYPRE_NO_GLOBAL_PARTITION fpts_starts = hypre_TAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); new_fpts_starts = hypre_TAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); hypre_MPI_Scan(&n_Fpts, fpts_starts+1, 1, HYPRE_MPI_BIG_INT, hypre_MPI_SUM, comm); hypre_MPI_Scan(&n_new_Fpts, new_fpts_starts+1, 1, HYPRE_MPI_BIG_INT, hypre_MPI_SUM, comm); fpts_starts[0] = fpts_starts[1] - n_Fpts; new_fpts_starts[0] = new_fpts_starts[1] - n_new_Fpts; if (my_id == num_procs - 1) { total_global_new_fpts = new_fpts_starts[1]; total_global_fpts = fpts_starts[1]; total_global_cpts = cpts_starts[1]; } hypre_MPI_Bcast(&total_global_new_fpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); hypre_MPI_Bcast(&total_global_fpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); #else fpts_starts = hypre_CTAlloc(HYPRE_BigInt, num_procs+1, HYPRE_MEMORY_HOST); new_fpts_starts = hypre_TAlloc(HYPRE_BigInt, num_procs+1, HYPRE_MEMORY_HOST); hypre_MPI_Allgather(&n_Fpts, 1, HYPRE_MPI_BIG_INT, &fpts_starts[1], 1, HYPRE_MPI_BIG_INT, comm); hypre_MPI_Allgather(&n_new_Fpts, 1, HYPRE_MPI_BIG_INT, &new_fpts_starts[1], 1, HYPRE_MPI_BIG_INT, comm); fpts_starts[0] = 0; new_fpts_starts[0] = 0; for (i = 2; i < num_procs+1; i++) { fpts_starts[i] += fpts_starts[i-1]; new_fpts_starts[i] += new_fpts_starts[i-1]; } total_global_new_fpts = new_fpts_starts[num_procs]; total_global_fpts = fpts_starts[num_procs]; total_global_cpts = cpts_starts[num_procs]; #endif } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif for (i=start; i < stop; i++) { if (CF_marker[i] > 0) { #ifdef HYPRE_NO_GLOBAL_PARTITION big_convert[i] = (HYPRE_BigInt)fine_to_coarse[i] + cpts_starts[0]; #else big_convert[i] = (HYPRE_BigInt)fine_to_coarse[i] + cpts_starts[my_id]; #endif } else { #ifdef HYPRE_NO_GLOBAL_PARTITION big_convert[i] = (HYPRE_BigInt)fine_to_fine[i] + fpts_starts[0]; #else big_convert[i] = (HYPRE_BigInt)fine_to_fine[i] + fpts_starts[my_id]; #endif } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (my_thread_num == 0) { if (num_cols_A_offd) { CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); big_convert_offd = hypre_CTAlloc(HYPRE_BigInt, num_cols_A_offd, HYPRE_MEMORY_HOST); fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); fine_to_fine_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); } index = 0; num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); big_buf_data = hypre_CTAlloc(HYPRE_BigInt, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); for (i = 0; i < num_sends; i++) { startc = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = startc; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) { int_buf_data[index] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; big_buf_data[index++] = big_convert[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = hypre_ParCSRCommHandleCreate( 21, comm_pkg, big_buf_data, big_convert_offd); hypre_ParCSRCommHandleDestroy(comm_handle); num_cols_offd_A_FC = 0; num_cols_offd_A_FF = 0; if (num_cols_A_offd) { for (i=0; i < num_cols_A_offd; i++) { if (CF_marker_offd[i] > 0) { fine_to_coarse_offd[i] = num_cols_offd_A_FC++; fine_to_fine_offd[i] = -1; } else { fine_to_fine_offd[i] = num_cols_offd_A_FF++; fine_to_coarse_offd[i] = -1; } } col_map_offd_A_FF = hypre_TAlloc(HYPRE_BigInt, num_cols_offd_A_FF, HYPRE_MEMORY_HOST); col_map_offd_A_FC = hypre_TAlloc(HYPRE_BigInt, num_cols_offd_A_FC, HYPRE_MEMORY_HOST); cpt = 0; fpt = 0; for (i=0; i < num_cols_A_offd; i++) { if (CF_marker_offd[i] > 0) { col_map_offd_A_FC[cpt++] = big_convert_offd[i]; } else { col_map_offd_A_FF[fpt++] = big_convert_offd[i]; } } } A_FF_diag_i = hypre_CTAlloc(HYPRE_Int,n_new_Fpts+1, memory_location_P); A_FC_diag_i = hypre_CTAlloc(HYPRE_Int,n_Fpts+1, memory_location_P); A_FF_offd_i = hypre_CTAlloc(HYPRE_Int,n_new_Fpts+1, memory_location_P); A_FC_offd_i = hypre_CTAlloc(HYPRE_Int,n_Fpts+1, memory_location_P); } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif d_count_FC = 0; d_count_FF = 0; o_count_FC = 0; o_count_FF = 0; row = new_fpt_array[my_thread_num]; rowc = fpt_array[my_thread_num]; for (i=start; i < stop; i++) { if (CF_marker[i] == -2) { row++; rowc++; d_count_FF++; /* account for diagonal element */ for (j = S_diag_i[i]; j < S_diag_i[i+1]; j++) { jj = S_diag_j[j]; if (CF_marker[jj] > 0) d_count_FC++; else d_count_FF++; } A_FF_diag_i[row] = d_count_FF; A_FC_diag_i[rowc] = d_count_FC; for (j = S_offd_i[i]; j < S_offd_i[i+1]; j++) { jj = S_offd_j[j]; if (CF_marker_offd[jj] > 0) o_count_FC++; else o_count_FF++; } A_FF_offd_i[row] = o_count_FF; A_FC_offd_i[rowc] = o_count_FC; } else if (CF_marker[i] == -1) { rowc++; for (j = S_diag_i[i]; j < S_diag_i[i+1]; j++) { jj = S_diag_j[j]; if (CF_marker[jj] > 0) d_count_FC++; } A_FC_diag_i[rowc] = d_count_FC; for (j = S_offd_i[i]; j < S_offd_i[i+1]; j++) { jj = S_offd_j[j]; if (CF_marker_offd[jj] > 0) o_count_FC++; } A_FC_offd_i[rowc] = o_count_FC; } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (my_thread_num == 0) { HYPRE_Int fpt2, new_fpt2; for (i=1; i<num_threads+1; i++) { fpt = fpt_array[i]; new_fpt = new_fpt_array[i]; fpt2 = fpt_array[i-1]; new_fpt2 = new_fpt_array[i-1]; A_FC_diag_i[fpt] += A_FC_diag_i[fpt2]; A_FF_diag_i[new_fpt] += A_FF_diag_i[new_fpt2]; A_FC_offd_i[fpt] += A_FC_offd_i[fpt2]; A_FF_offd_i[new_fpt] += A_FF_offd_i[new_fpt2]; } row = new_fpt_array[num_threads]; rowc = fpt_array[num_threads]; d_count_FC = A_FC_diag_i[rowc]; d_count_FF = A_FF_diag_i[row]; o_count_FC = A_FC_offd_i[rowc]; o_count_FF = A_FF_offd_i[row]; A_FF_diag_j = hypre_CTAlloc(HYPRE_Int,d_count_FF, memory_location_P); A_FC_diag_j = hypre_CTAlloc(HYPRE_Int,d_count_FC, memory_location_P); A_FF_offd_j = hypre_CTAlloc(HYPRE_Int,o_count_FF, memory_location_P); A_FC_offd_j = hypre_CTAlloc(HYPRE_Int,o_count_FC, memory_location_P); A_FF_diag_data = hypre_CTAlloc(HYPRE_Real,d_count_FF, memory_location_P); A_FC_diag_data = hypre_CTAlloc(HYPRE_Real,d_count_FC, memory_location_P); A_FF_offd_data = hypre_CTAlloc(HYPRE_Real,o_count_FF, memory_location_P); A_FC_offd_data = hypre_CTAlloc(HYPRE_Real,o_count_FC, memory_location_P); } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif row = new_fpt_array[my_thread_num]; rowc = fpt_array[my_thread_num]; d_count_FC = A_FC_diag_i[rowc]; d_count_FF = A_FF_diag_i[row]; o_count_FC = A_FC_offd_i[rowc]; o_count_FF = A_FF_offd_i[row]; for (i=start; i < stop; i++) { if (CF_marker[i] == -2) { HYPRE_Int jS, jA; row++; rowc++; jA = A_diag_i[i]; A_FF_diag_j[d_count_FF] = fine_to_fine[A_diag_j[jA]]; A_FF_diag_data[d_count_FF++] = A_diag_data[jA++]; for (j = S_diag_i[i]; j < S_diag_i[i+1]; j++) { jA = A_diag_i[i]+1; jS = S_diag_j[j]; while (A_diag_j[jA] != jS) jA++; if (CF_marker[S_diag_j[j]] > 0) { A_FC_diag_j[d_count_FC] = fine_to_coarse[A_diag_j[jA]]; A_FC_diag_data[d_count_FC++] = A_diag_data[jA++]; } else { A_FF_diag_j[d_count_FF] = fine_to_fine[A_diag_j[jA]]; A_FF_diag_data[d_count_FF++] = A_diag_data[jA++]; } } A_FF_diag_i[row] = d_count_FF; A_FC_diag_i[rowc] = d_count_FC; for (j = S_offd_i[i]; j < S_offd_i[i+1]; j++) { jA = A_offd_i[i]; jS = S_offd_j[j]; while (jS != A_offd_j[jA]) jA++; if (CF_marker_offd[S_offd_j[j]] > 0) { A_FC_offd_j[o_count_FC] = fine_to_coarse_offd[A_offd_j[jA]]; A_FC_offd_data[o_count_FC++] = A_offd_data[jA++]; } else { A_FF_offd_j[o_count_FF] = fine_to_fine_offd[A_offd_j[jA]]; A_FF_offd_data[o_count_FF++] = A_offd_data[jA++]; } } A_FF_offd_i[row] = o_count_FF; A_FC_offd_i[rowc] = o_count_FC; } else if (CF_marker[i] == -1) { HYPRE_Int jS, jA; rowc++; for (j = S_diag_i[i]; j < S_diag_i[i+1]; j++) { jA = A_diag_i[i]+1; jS = S_diag_j[j]; while (A_diag_j[jA] != jS) jA++; if (CF_marker[S_diag_j[j]] > 0) { A_FC_diag_j[d_count_FC] = fine_to_coarse[A_diag_j[jA]]; A_FC_diag_data[d_count_FC++] = A_diag_data[jA++]; } } A_FC_diag_i[rowc] = d_count_FC; for (j = S_offd_i[i]; j < S_offd_i[i+1]; j++) { jA = A_offd_i[i]; jS = S_offd_j[j]; while (jS != A_offd_j[jA]) jA++; if (CF_marker_offd[S_offd_j[j]] > 0) { A_FC_offd_j[o_count_FC] = fine_to_coarse_offd[A_offd_j[jA]]; A_FC_offd_data[o_count_FC++] = A_offd_data[jA++]; } } A_FC_offd_i[rowc] = o_count_FC; } } } /*end parallel region */ A_FC = hypre_ParCSRMatrixCreate(comm, total_global_fpts, total_global_cpts, fpts_starts, cpts_starts, num_cols_offd_A_FC, A_FC_diag_i[n_Fpts], A_FC_offd_i[n_Fpts]); A_FF = hypre_ParCSRMatrixCreate(comm, total_global_new_fpts, total_global_fpts, new_fpts_starts, fpts_starts, num_cols_offd_A_FF, A_FF_diag_i[n_new_Fpts], A_FF_offd_i[n_new_Fpts]); A_FC_diag = hypre_ParCSRMatrixDiag(A_FC); hypre_CSRMatrixData(A_FC_diag) = A_FC_diag_data; hypre_CSRMatrixI(A_FC_diag) = A_FC_diag_i; hypre_CSRMatrixJ(A_FC_diag) = A_FC_diag_j; A_FC_offd = hypre_ParCSRMatrixOffd(A_FC); hypre_CSRMatrixData(A_FC_offd) = A_FC_offd_data; hypre_CSRMatrixI(A_FC_offd) = A_FC_offd_i; hypre_CSRMatrixJ(A_FC_offd) = A_FC_offd_j; hypre_ParCSRMatrixOwnsRowStarts(A_FC) = 1; hypre_ParCSRMatrixOwnsColStarts(A_FC) = 0; hypre_ParCSRMatrixColMapOffd(A_FC) = col_map_offd_A_FC; hypre_CSRMatrixMemoryLocation(A_FC_diag) = memory_location_P; hypre_CSRMatrixMemoryLocation(A_FC_offd) = memory_location_P; A_FF_diag = hypre_ParCSRMatrixDiag(A_FF); hypre_CSRMatrixData(A_FF_diag) = A_FF_diag_data; hypre_CSRMatrixI(A_FF_diag) = A_FF_diag_i; hypre_CSRMatrixJ(A_FF_diag) = A_FF_diag_j; A_FF_offd = hypre_ParCSRMatrixOffd(A_FF); hypre_CSRMatrixData(A_FF_offd) = A_FF_offd_data; hypre_CSRMatrixI(A_FF_offd) = A_FF_offd_i; hypre_CSRMatrixJ(A_FF_offd) = A_FF_offd_j; hypre_ParCSRMatrixOwnsRowStarts(A_FF) = 1; hypre_ParCSRMatrixOwnsColStarts(A_FF) = 0; hypre_ParCSRMatrixColMapOffd(A_FF) = col_map_offd_A_FF; hypre_CSRMatrixMemoryLocation(A_FF_diag) = memory_location_P; hypre_CSRMatrixMemoryLocation(A_FF_offd) = memory_location_P; hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_fine, HYPRE_MEMORY_HOST); hypre_TFree(big_convert, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_fine_offd, HYPRE_MEMORY_HOST); hypre_TFree(big_convert_offd, HYPRE_MEMORY_HOST); hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(big_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(cpt_array, HYPRE_MEMORY_HOST); hypre_TFree(fpt_array, HYPRE_MEMORY_HOST); hypre_TFree(new_fpt_array, HYPRE_MEMORY_HOST); *A_FC_ptr = A_FC; *A_FF_ptr = A_FF; return hypre_error_flag; }
GB_unop__identity_uint16_fc32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_uint16_fc32) // op(A') function: GB (_unop_tran__identity_uint16_fc32) // C type: uint16_t // A type: GxB_FC32_t // cast: uint16_t cij = GB_cast_to_uint16_t ((double) crealf (aij)) // unaryop: cij = aij #define GB_ATYPE \ GxB_FC32_t #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint16_t z = GB_cast_to_uint16_t ((double) crealf (aij)) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint16_t z = GB_cast_to_uint16_t ((double) crealf (aij)) ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT16 || GxB_NO_FC32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_uint16_fc32) ( uint16_t *Cx, // Cx and Ax may be aliased const GxB_FC32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; uint16_t z = GB_cast_to_uint16_t ((double) crealf (aij)) ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC32_t aij = Ax [p] ; uint16_t z = GB_cast_to_uint16_t ((double) crealf (aij)) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_uint16_fc32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
fc_kernel_fp16_arm82.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2020, Open AI Lab * Author: xlchen@openailab.com */ #include <stdint.h> #include <stdlib.h> #include <math.h> #include <arm_neon.h> #include "fc_kernel_fp16_arm82.h" #include "compiler_fp16.h" void hgemv_1x8_a55(_fp16* biases, _fp16* input, _fp16* kernel, long kernel_size, _fp16* output); void hgemv_1x2_a55(_fp16* biases, _fp16* input, _fp16* kernel, long kernel_size, _fp16* output); // start and end channel must be 8 aligned void hgemv1x8(const _fp16* input, const _fp16* output, _fp16* weight_interleaved, const _fp16* biases, int kernel_size, int start_channel, int end_channel, int num_thread, int cpu_affinity) { int ch = 0; _fp16 *cur_kernel, *cur_biases, *cur_result; // #pragma omp parallel for num_threads(num_thread) for(ch = start_channel; ch < end_channel; ch += 8) { cur_kernel = ( _fp16* )(weight_interleaved + kernel_size * ch); cur_result = ( _fp16* )(output + ch); cur_biases = biases ? ( _fp16* )(biases + ch) : NULL; hgemv_1x8_a55(cur_biases, ( _fp16* )input, cur_kernel, kernel_size, cur_result); // todo implement with A76 } } // start channel must be 2 aligned void hgemv1x2(const _fp16* input, const _fp16* output, _fp16* weight_interleaved, const _fp16* biases, int kernel_size, int start_channel, int end_channel, int num_thread, int cpu_affinity) { _fp16 sum; int ch = 0; _fp16 *cur_kernel, *cur_biases, *cur_result; for(ch = start_channel; ch < (end_channel & -2); ch += 2) { cur_kernel = ( _fp16* )(weight_interleaved + kernel_size * ch); cur_result = ( _fp16* )(output + ch); cur_biases = biases ? ( _fp16* )(biases + ch) : NULL; hgemv_1x2_a55(cur_biases, ( _fp16* )input, cur_kernel, kernel_size, cur_result); } if(end_channel & 0x1) { cur_kernel = ( _fp16* )(weight_interleaved + kernel_size * ch); cur_result = ( _fp16* )(output + ch); sum = biases ? *(biases + ch) : 0.f; for(int j = 0; j < kernel_size; j++) sum = sum + input[j] * cur_kernel[j]; *cur_result = sum; } } static void interleave_kernel(const _fp16* kernel, _fp16* kernel_interleaved, int out_chan, int kernel_size) { int i, j, k; _fp16* cur_kernel[8]; _fp16* cur_kernel_interleaved; // interleave 8 kernel for(i = 0; i < (out_chan & -8); i += 8) { for(j = 0; j < 8; j++) cur_kernel[j] = ( _fp16* )kernel + kernel_size * (i + j); cur_kernel_interleaved = ( _fp16* )kernel_interleaved + kernel_size * i; for(k = 0; k < kernel_size; k++) for(j = 0; j < 8; j++) cur_kernel_interleaved[8 * k + j] = *(cur_kernel[j] + k); } // interleave 2 kernel for(; i < (out_chan & -2); i += 2) { for(j = 0; j < 2; j++) cur_kernel[j] = ( _fp16* )kernel + kernel_size * (i + j); cur_kernel_interleaved = ( _fp16* )kernel_interleaved + kernel_size * i; for(k = 0; k < kernel_size; k++) for(j = 0; j < 2; j++) cur_kernel_interleaved[2 * k + j] = *(cur_kernel[j] + k); } // copy last kernel if(out_chan & 0x1) { cur_kernel[0] = ( _fp16* )kernel + kernel_size * i; cur_kernel_interleaved = ( _fp16* )kernel_interleaved + kernel_size * i; for(k = 0; k < kernel_size; k++) cur_kernel_interleaved[k] = *(cur_kernel[0] + k); } return; } int fp16_fc_kernel_prerun(struct ir_tensor* input_tensor , \ struct ir_tensor* filter_tensor , \ struct ir_tensor* output_tensor , \ struct fc_priv_info* priv_info , \ struct fc_param* param) { int num_output = param->num_output; int kernel_size = filter_tensor->dims[1]; int kernel_align = ((kernel_size + 1) & -2); if (!priv_info->interleave_buffer) { int mem_size = sizeof(_fp16) * num_output * kernel_align; void* mem = sys_malloc(mem_size); priv_info->interleave_buffer = mem; priv_info->interleave_buffer_size = mem_size; } if (!priv_info->input_buffer) { int mem_size = sizeof(_fp16) * kernel_align; void* mem = sys_malloc(mem_size); priv_info->input_buffer = mem; priv_info->input_buffer_size = mem_size; } _fp16* filter_data = (_fp16*)filter_tensor->data; interleave_kernel(filter_data, (_fp16*)priv_info->interleave_buffer, num_output, kernel_size); return 0; } int fp16_fc_kernel_run(struct ir_tensor* input_tensor , \ struct ir_tensor* filter_tensor , \ struct ir_tensor* bias_tensor , \ struct ir_tensor* output_tensor , \ struct fc_priv_info* priv_info , \ struct fc_param* param, \ int num_thread, int cpu_affinity) { int out_num = param->num_output; int kernel_size = filter_tensor->dims[1]; _fp16* input = (_fp16*)input_tensor->data; _fp16* output = (_fp16*)output_tensor->data; _fp16* weight = (_fp16*)priv_info->interleave_buffer; _fp16* biases = NULL; if (bias_tensor) biases = (_fp16*)bias_tensor->data; int out_num_8 = out_num & ~7; for(int i = 0; i < input_tensor->dims[0]; i++) { _fp16* cur_input = input + i * kernel_size; _fp16* cur_output = output + i * out_num; hgemv1x8(cur_input, cur_output, weight, biases, kernel_size, 0, out_num_8, num_thread, cpu_affinity); if(out_num & 0x7) hgemv1x2(cur_input, cur_output, weight, biases, kernel_size, out_num_8, out_num, num_thread, cpu_affinity); } return 0 ; }
stencil.c
/* * Simple Stencil example * Main program example * * Brian J Gravelle * gravelle@cs.uoregon.edu * */ #include "mesh.h" #ifdef USE_CALI #include <caliper/cali.h> #endif #include <stdlib.h> #include <stdio.h> #include <omp.h> #ifndef X_SIZE #define X_SIZE 10000 #endif #ifndef Y_SIZE #define Y_SIZE 20000 #endif #ifndef TIME #define TIME 0.0 #endif #ifndef STEP #define STEP 1.0 #endif #ifndef TIME_STOP #define TIME_STOP 5.0 #endif #ifndef FILE_NAME #define FILE_NAME "openmp_results.txt" #endif #if USE_AOS // create and fill the mesh with starting values int init_mesh(struct Mesh ***mesh, int x_size, int y_size) { #ifdef USE_CALI CALI_CXX_MARK_FUNCTION; #endif struct Mesh **_mesh; int err = FALSE; int i, j; double H = 100; double V = 1000; // allocate memory and verify that it worked _mesh = (struct Mesh**) malloc(x_size * sizeof(struct Mesh*)); if(_mesh == NULL) err = TRUE; for (i = 0; i < x_size; ++i) { _mesh[i] = (struct Mesh*) malloc(y_size * sizeof(struct Mesh)); if(_mesh[i] == NULL) err = TRUE; } // define starting values for (i = 0; i < x_size; i++) { V = 1000; for (j = 0; j < y_size; j++) { ACCESS_MESH(_mesh, i, j, avg) = H; ACCESS_MESH(_mesh, i, j, sum) = V; ACCESS_MESH(_mesh, i, j, pde) = i*j; ACCESS_MESH(_mesh, i, j, dep) = H+V; // _mesh[i][j].avg = H; // _mesh[i][j].sum = V; // _mesh[i][j].pde = i*j; // _mesh[i][j].dep = H+V; V += 1000; } H += 100; } *mesh = _mesh; return err; } // liberate the memory void free_mesh(MESH mesh, int x_size, int y_size) { #ifdef USE_CALI CALI_CXX_MARK_FUNCTION; #endif int i; for (i = 0; i < x_size; ++i) { free(mesh[i]); } free(mesh); } #else // USE_SOA // create and fill the mesh with starting values int init_mesh(struct Mesh *mesh, int x_size, int y_size) { #ifdef USE_CALI CALI_CXX_MARK_FUNCTION; #endif int err = FALSE; int i, j; double H = 100; double V = 1000; // allocate memory and verify that it worked mesh->avg = (double**) malloc(x_size * sizeof(double*)); mesh->sum = (double**) malloc(x_size * sizeof(double*)); mesh->pde = (double**) malloc(x_size * sizeof(double*)); mesh->dep = (double**) malloc(x_size * sizeof(double*)); if(mesh->avg == NULL) err = TRUE; if(mesh->sum == NULL) err = TRUE; if(mesh->pde == NULL) err = TRUE; if(mesh->dep == NULL) err = TRUE; for (i = 0; i < x_size; ++i) { mesh->avg[i] = (double*) malloc(y_size * sizeof(double)); mesh->sum[i] = (double*) malloc(y_size * sizeof(double)); mesh->pde[i] = (double*) malloc(y_size * sizeof(double)); mesh->dep[i] = (double*) malloc(y_size * sizeof(double)); if(mesh->avg[i] == NULL) err = TRUE; if(mesh->sum[i] == NULL) err = TRUE; if(mesh->pde[i] == NULL) err = TRUE; if(mesh->dep[i] == NULL) err = TRUE; } // define starting values for (i = 0; i < x_size; i++) { V = 1000; for (j = 0; j < y_size; j++) { mesh->sum[i][j] = H; mesh->avg[i][j] = V; mesh->pde[i][j] = i*j; mesh->dep[i][j] = H+V; V += 1000; } H += 100; } return err; } // liberate the memory void free_mesh(struct Mesh mesh, int x_size, int y_size){ #ifdef USE_CALI CALI_CXX_MARK_FUNCTION; #endif int i; for (i = 0; i < x_size; ++i) { free(mesh.avg[i]); free(mesh.sum[i]); free(mesh.pde[i]); free(mesh.dep[i]); } free(mesh.avg); free(mesh.sum); free(mesh.pde); free(mesh.dep); } #endif double pythag(double x1, double y1, double x2, double y2) { return (x1-x2)*(x1-x2)+(y1-y2)*(y1-y2); } // perform one iteration of the timestep void do_timestep(MESH mesh, MESH temp_mesh, int x_size, int y_size, double time, double dt) { #ifdef USE_CALI CALI_CXX_MARK_FUNCTION; #endif int thr_id; double dt2 = dt*dt; double C = 0.25, dx2 = 1.0; int _x, _y, n, i, j, t; // looping over all rows of the matrix // main source of paralleism #pragma omp parallel private(_y, n, t, thr_id, dx2) { // establish temporary mesh for this thread thr_id = omp_get_thread_num(); int neighbors[NUM_NEIGHBORS][2]; #ifdef USE_CALI CALI_MARK_BEGIN("computation"); #endif #pragma omp for for (_x = 0; _x < x_size; _x++) { // fill next temp row with starting values for (_y = 0; _y < y_size; _y++) { ACCESS_MESH(temp_mesh, _x, _y, avg) = 0; ACCESS_MESH(temp_mesh, _x, _y, sum) = 0; ACCESS_MESH(temp_mesh, _x, _y, pde) = -2*dt2 * ACCESS_MESH(mesh, _x, _y, pde) * C; ACCESS_MESH(temp_mesh, _x, _y, dep) = -2*dt2 * ACCESS_MESH(mesh, _x, _y, dep) * C; } // actually do some computation for (_y = 0; _y < y_size; _y++) { get_neighbors(x_size, y_size, _x, _y, neighbors); for(n = 0; n < NUM_NEIGHBORS; n++) { ACCESS_MESH(temp_mesh, _x, _y, avg) += ACCESS_MESH(mesh, neighbors[n][X], neighbors[n][Y], avg); } ACCESS_MESH(temp_mesh, _x, _y, avg) /= NUM_NEIGHBORS; for(n = 0; n < NUM_NEIGHBORS; n++) { ACCESS_MESH(temp_mesh, _x, _y, sum) += ACCESS_MESH(mesh, neighbors[n][X], neighbors[n][Y], sum)/NUM_NEIGHBORS; } for(n = 0; n < NUM_NEIGHBORS; n++){ dx2 = pythag(_x, _y, neighbors[n][X], neighbors[n][Y]); // dx^2 ACCESS_MESH(temp_mesh, _x, _y, pde) += (-2*dt2 * ACCESS_MESH(mesh, neighbors[n][X], neighbors[n][Y], pde)) / ((dx2 + 1.0) * C); } for(n = 0; n < NUM_NEIGHBORS; n++){ dx2 = pythag(_x, _y, neighbors[n][X], neighbors[n][Y]); // dx^2 ACCESS_MESH(temp_mesh, _x, _y, dep) += (ACCESS_MESH(mesh, neighbors[n][X], neighbors[n][Y], avg)*dt2 * \ ACCESS_MESH(mesh, neighbors[n][X], neighbors[n][Y], dep)) / \ ((dx2 + ACCESS_MESH(mesh, neighbors[n][X], neighbors[n][Y], sum)) * C); } } // _y loop } // _x loop #ifdef USE_CALI CALI_MARK_END("computation"); #endif } // parallel region } // do time step // print the mesh void print_mesh(MESH mesh, int x_size, int y_size) { #ifdef USE_CALI CALI_CXX_MARK_FUNCTION; #endif int i, j; for (i = 0; i < x_size; i++) { printf("x = %d\n", i); for (j = 0; j < y_size; j++) { printf("%10.2e ", ACCESS_MESH(mesh, i, j, avg)); } printf("\n"); for (j = 0; j < y_size; j++) { printf("%10.2e ", ACCESS_MESH(mesh, i, j, sum)); } printf("\n"); for (j = 0; j < y_size; j++) { printf("%10.2e ", ACCESS_MESH(mesh, i, j, pde)); } printf("\n"); for (j = 0; j < y_size; j++) { printf("%10.2e ", ACCESS_MESH(mesh, i, j, dep)); } printf("\n\n"); } } // print the mesh to file void output_mesh(FILE* file, MESH mesh, int x_size, int y_size) { #ifdef USE_CALI CALI_CXX_MARK_FUNCTION; #endif int i, j; for (i = 0; i < x_size; i++) { fprintf(file, "x = %d\n", i); for (j = 0; j < y_size; j++) { fprintf(file, "%10.2e ", ACCESS_MESH(mesh, i, j, avg)); } fprintf(file, "\n"); for (j = 0; j < y_size; j++) { fprintf(file, "%10.2e ", ACCESS_MESH(mesh, i, j, sum)); } fprintf(file, "\n"); for (j = 0; j < y_size; j++) { fprintf(file, "%10.2e ", ACCESS_MESH(mesh, i, j, pde)); } fprintf(file, "\n"); for (j = 0; j < y_size; j++) { fprintf(file, "%10.2e ", ACCESS_MESH(mesh, i, j, dep)); } fprintf(file, "\n\n"); } } int test_small_mesh() { #ifdef USE_CALI CALI_CXX_MARK_FUNCTION; #endif int err = FALSE; MESH mesh_1; MESH mesh_2; int x_size = 5; int y_size = 10; double time = 0.0; printf("init_mesh...\n"); err = err | init_mesh(&mesh_1, x_size, y_size); err = err | init_mesh(&mesh_2, TEMP_ROWS, y_size); #if USE_AOS if(mesh_1 == NULL) return 1; if(mesh_2 == NULL) return 1; #else if(mesh_1.avg == NULL) return 1; if(mesh_2.avg == NULL) return 1; if(mesh_1.sum == NULL) return 1; if(mesh_2.sum == NULL) return 1; if(mesh_1.pde == NULL) return 1; if(mesh_2.pde == NULL) return 1; if(mesh_1.dep == NULL) return 1; if(mesh_2.dep == NULL) return 1; #endif printf("print_mesh...\n"); print_mesh(mesh_1, x_size, y_size); printf("do_timestep...\n"); do_timestep(mesh_1, mesh_2, x_size, y_size, time, 1.0); printf("print_mesh...\n"); print_mesh(mesh_1, x_size, y_size); printf("free_mesh...\n"); free_mesh(mesh_1, x_size, y_size); free_mesh(mesh_2, TEMP_ROWS, y_size); return err; } int run_custom_mesh(int x_size, int y_size, double time, double step, double time_stop) { #ifdef USE_CALI CALI_CXX_MARK_FUNCTION; #endif int err = FALSE; MESH main_mesh; MESH temp_mesh; double wall_tot_start, wall_tot_end; double wall_init_start, wall_init_end; double wall_step_start, wall_step_end; double wall_free_start, wall_free_end; int num_thr; omp_set_num_threads(4); #pragma omp parallel { if(omp_get_thread_num()==0) num_thr = omp_get_num_threads(); } printf("\n\nRunning new Stencil with \n\ x_size = %d \n\ y_size = %d \n\ num_thr = %d \n\ start time = %f \n\ time step = %f \n\ end time = %f \n\n", x_size, y_size, num_thr, time, step, time_stop); wall_tot_start = omp_get_wtime(); wall_init_start = omp_get_wtime(); printf("init_mesh......."); fflush(stdout); err = err | init_mesh(&main_mesh, x_size, y_size); err = err | init_mesh(&temp_mesh, x_size, y_size); #if USE_AOS if(main_mesh == NULL) return 1; if(temp_mesh == NULL) return 1; #else if(main_mesh.avg == NULL) return 1; if(temp_mesh.avg == NULL) return 1; if(main_mesh.sum == NULL) return 1; if(temp_mesh.sum == NULL) return 1; if(main_mesh.pde == NULL) return 1; if(temp_mesh.pde == NULL) return 1; if(main_mesh.dep == NULL) return 1; if(temp_mesh.dep == NULL) return 1; #endif wall_init_end = omp_get_wtime(); printf("%fs\n", (wall_init_end - wall_init_start)); #ifdef DO_IO printf("output to file....."); fflush(stdout); double io_start = omp_get_wtime(); FILE* file = fopen(FILE_NAME, "w+"); fprintf(file, "\n\nRunning new Stencil with \n\ x_size = %d \n\ y_size = %d \n\ start time = %f \n\ time step = %f \n\ end time = %f \n\n", x_size, y_size, time, step, time_stop); output_mesh(file, main_mesh, x_size, y_size); printf("%fs\n", (omp_get_wtime() - io_start)); #endif while(time < time_stop) { printf("timestep %.2f...", time); fflush(stdout); wall_step_start = omp_get_wtime(); do_timestep(main_mesh, temp_mesh, x_size, y_size, time, step); time += step; wall_step_end = omp_get_wtime(); printf("%fs\n", (wall_step_end - wall_step_start)); printf("timestep %.2f...", time); fflush(stdout); wall_step_start = omp_get_wtime(); do_timestep(temp_mesh, main_mesh, x_size, y_size, time, step); time += step; wall_step_end = omp_get_wtime(); printf("%fs\n", (wall_step_end - wall_step_start)); } #ifdef DO_IO io_start = omp_get_wtime(); printf("output to file....."); fflush(stdout); fprintf(file, "\n\n"); output_mesh(file, main_mesh, x_size, y_size); fprintf(file, "\n\n"); fclose(file); printf("%fs\n", (omp_get_wtime() - io_start)); #endif printf("free_mesh.......\n"); fflush(stdout); wall_free_start = omp_get_wtime(); free_mesh(main_mesh, x_size, y_size); free_mesh(temp_mesh, x_size, y_size); wall_free_end = omp_get_wtime(); printf("%fs\n", (wall_free_end - wall_free_start)); wall_tot_end = omp_get_wtime(); printf("\n total time: %fs\n", (wall_tot_end - wall_tot_start)); return err; } // main function int main(int argc, char **argv) { #ifdef USE_CALI cali_id_t thread_attr = cali_create_attribute("thread_id", CALI_TYPE_INT, CALI_ATTR_ASVALUE | CALI_ATTR_SKIP_EVENTS); #pragma omp parallel { cali_set_int(thread_attr, omp_get_thread_num()); } #endif int err = FALSE; // err = err | test_small_mesh(); // printf("\n\n"); err = err | run_custom_mesh(X_SIZE, Y_SIZE, TIME, STEP, TIME_STOP); return err; }
zgetri.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @precisions normal z -> s d c * **/ #include "plasma.h" #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_tuning.h" #include "plasma_types.h" #include "plasma_workspace.h" /***************************************************************************//** * * @ingroup plasma_getri * * Computes the inverse of a matrix A using the LU factorization computed * by plasma_zgetrf. * ******************************************************************************* * * @param[in] n * The order of the matrix A. n >= 0. * * @param[in,out] pA * On entry, the LU factors computed by plasma_zgetrf. * On exit, the inverse of A, overwriting the factors. * * @param[in] lda * The leading dimension of the array A. lda >= max(1,n). * * @param[in] ipiv * The pivot indices computed by plasma_zgetrf. * ******************************************************************************* * * @retval PLASMA_SUCCESS successful exit * @retval < 0 if -i, the i-th argument had an illegal value * @retval > 0 if i, the (i,i) element of the factor U or L is * zero, and the inverse could not be computed. * ******************************************************************************* * * @sa plasma_cgetri * @sa plasma_dgetri * @sa plasma_sgetri * ******************************************************************************/ int plasma_zgetri(int n, plasma_complex64_t *pA, int lda, int *ipiv) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_fatal_error("PLASMA not initialized"); return PlasmaErrorNotInitialized; } // Check input arguments. if (n < 0) { plasma_error("illegal value of n"); return -1; } if (lda < imax(1, n)) { plasma_error("illegal value of lda"); return -3; } // quick return if (imax(n, 0) == 0) return PlasmaSuccess; // Tune parameters. if (plasma->tuning) plasma_tune_getrf(plasma, PlasmaComplexDouble, n, n); // Set tiling parameters. int nb = plasma->nb; // Create tile matrix. plasma_desc_t A; plasma_desc_t W; int retval; retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb, n, n, 0, 0, n, n, &A); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); return retval; } retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb, n, nb, 0, 0, n, nb, &W); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); plasma_desc_destroy(&A); return retval; } // Initialize sequence. plasma_sequence_t sequence; retval = plasma_sequence_init(&sequence); // Initialize request. plasma_request_t request; retval = plasma_request_init(&request); // asynchronous block #pragma omp parallel #pragma omp master { // Translate to tile layout. plasma_omp_zge2desc(pA, lda, A, &sequence, &request); // Perform computation. plasma_omp_zgetri(A, ipiv, W, &sequence, &request); // Translate back to LAPACK layout. plasma_omp_zdesc2ge(A, pA, lda, &sequence, &request); } // implicit synchronization // Free matrices in tile layout. plasma_desc_destroy(&W); plasma_desc_destroy(&A); // Return status. int status = sequence.status; return status; } /***************************************************************************//** * * Computes the inverse of a matrix A using the LU factorization. * Non-blocking tile version of plasma_zgbsv(). * Operates on matrices stored by tiles. * All matrices are passed through descriptors. * All dimensions are taken from the descriptors. * Allows for pipelining of operations at runtime. * ******************************************************************************* * * @param[in] A * On entry, the LU factors computed by plasma_zgetrf. * On exit, the inverse of A, overwriting the factors. * * @param[in] ipiv * The pivot indices computed by plasma_zgetrf. * * @param[out] W * Workspace of dimension (n, nb) * * @param[in] sequence * Identifies the sequence of function calls that this call belongs to * (for completion checks and exception handling purposes). Check * the sequence->status for errors. * * @param[out] request * Identifies this function call (for exception handling purposes). * * @retval void * Errors are returned by setting sequence->status and * request->status to error values. The sequence->status and * request->status should never be set to PlasmaSuccess (the * initial values) since another async call may be setting a * failure value at the same time. * ******************************************************************************* * * @sa plasma_zgetri * @sa plasma_omp_zgetri * @sa plasma_omp_cgetri * @sa plasma_omp_dgetri * @sa plasma_omp_sgetri * ******************************************************************************/ void plasma_omp_zgetri(plasma_desc_t A, int *ipiv, plasma_desc_t W, plasma_sequence_t *sequence, plasma_request_t *request) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_error("PLASMA not initialized"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // Check input arguments. if (plasma_desc_check(A) != PlasmaSuccess) { plasma_error("invalid A"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(W) != PlasmaSuccess) { plasma_error("invalid W"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (sequence == NULL) { plasma_error("NULL sequence"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (request == NULL) { plasma_error("NULL request"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // quick return if (A.n == 0) return; // Invert triangular part. plasma_pztrtri(PlasmaUpper, PlasmaNonUnit, A, sequence, request); // Compute product of inverse of the upper and lower triangles. plasma_pzgetri_aux(A, W, sequence, request); // Apply pivot. plasma_pzgeswp(PlasmaColumnwise, A, ipiv, -1, sequence, request); }
loop-12.c
/* { dg-do run } */ #include <omp.h> extern void abort (void); #define LLONG_MAX __LONG_LONG_MAX__ #define ULLONG_MAX (LLONG_MAX * 2ULL + 1) #define INT_MAX __INT_MAX__ int arr[6 * 5]; void set (int loopidx, int idx) { #pragma omp atomic arr[loopidx * 5 + idx]++; } #define check(var, val, loopidx, idx) \ if (var == (val)) set (loopidx, idx); else #define test(loopidx, count) \ for (idx = 0; idx < 5; idx++) \ if (arr[loopidx * 5 + idx] != idx < count) \ abort (); \ else \ arr[loopidx * 5 + idx] = 0 int test1 (void) { int e = 0, idx; #pragma omp parallel reduction(+:e) { long long i; unsigned long long j; #pragma omp for schedule(dynamic,1) nowait for (i = LLONG_MAX - 30001; LLONG_MAX - 10001 >= i; i += 10000) { check (i, LLONG_MAX - 30001, 0, 0) check (i, LLONG_MAX - 20001, 0, 1) check (i, LLONG_MAX - 10001, 0, 2) e = 1; } #pragma omp for schedule(dynamic,1) nowait for (i = -LLONG_MAX + 30000; -LLONG_MAX + 10000 <= i; i -= 10000) { check (i, -LLONG_MAX + 30000, 1, 0) check (i, -LLONG_MAX + 20000, 1, 1) check (i, -LLONG_MAX + 10000, 1, 2) e = 1; } #pragma omp for schedule(dynamic,1) nowait for (j = 20; LLONG_MAX - 70 >= j; j += LLONG_MAX + 50ULL) { check (j, 20, 2, 0) e = 1; } #pragma omp for schedule(dynamic,1) nowait for (j = ULLONG_MAX - 3; LLONG_MAX + 70ULL <= j; j -= LLONG_MAX + 50ULL) { check (j, ULLONG_MAX - 3, 3, 0) e = 1; } #pragma omp for schedule(dynamic,1) nowait for (j = LLONG_MAX - 20000ULL; LLONG_MAX + 10000ULL >= j; j += 10000ULL) { check (j, LLONG_MAX - 20000ULL, 4, 0) check (j, LLONG_MAX - 10000ULL, 4, 1) check (j, LLONG_MAX, 4, 2) check (j, LLONG_MAX + 10000ULL, 4, 3) e = 1; } #pragma omp for schedule(dynamic,1) nowait for (i = -3LL * INT_MAX - 20000LL; INT_MAX + 10000LL >= i; i += INT_MAX + 200LL) { check (i, -3LL * INT_MAX - 20000LL, 5, 0) check (i, -2LL * INT_MAX - 20000LL + 200LL, 5, 1) check (i, -INT_MAX - 20000LL + 400LL, 5, 2) check (i, -20000LL + 600LL, 5, 3) check (i, INT_MAX - 20000LL + 800LL, 5, 4) e = 1; } } if (e) abort (); test (0, 3); test (1, 3); test (2, 1); test (3, 1); test (4, 4); test (5, 5); return 0; } int test2 (void) { int e = 0, idx; #pragma omp parallel reduction(+:e) { long long i; unsigned long long j; #pragma omp for schedule(guided,1) nowait for (i = LLONG_MAX - 30001; LLONG_MAX - 10001 >= i; i += 10000) { check (i, LLONG_MAX - 30001, 0, 0) check (i, LLONG_MAX - 20001, 0, 1) check (i, LLONG_MAX - 10001, 0, 2) e = 1; } #pragma omp for schedule(guided,1) nowait for (i = -LLONG_MAX + 30000; -LLONG_MAX + 10000 <= i; i -= 10000) { check (i, -LLONG_MAX + 30000, 1, 0) check (i, -LLONG_MAX + 20000, 1, 1) check (i, -LLONG_MAX + 10000, 1, 2) e = 1; } #pragma omp for schedule(guided,1) nowait for (j = 20; LLONG_MAX - 70 >= j; j += LLONG_MAX + 50ULL) { check (j, 20, 2, 0) e = 1; } #pragma omp for schedule(guided,1) nowait for (j = ULLONG_MAX - 3; LLONG_MAX + 70ULL <= j; j -= LLONG_MAX + 50ULL) { check (j, ULLONG_MAX - 3, 3, 0) e = 1; } #pragma omp for schedule(guided,1) nowait for (j = LLONG_MAX - 20000ULL; LLONG_MAX + 10000ULL >= j; j += 10000ULL) { check (j, LLONG_MAX - 20000ULL, 4, 0) check (j, LLONG_MAX - 10000ULL, 4, 1) check (j, LLONG_MAX, 4, 2) check (j, LLONG_MAX + 10000ULL, 4, 3) e = 1; } #pragma omp for schedule(guided,1) nowait for (i = -3LL * INT_MAX - 20000LL; INT_MAX + 10000LL >= i; i += INT_MAX + 200LL) { check (i, -3LL * INT_MAX - 20000LL, 5, 0) check (i, -2LL * INT_MAX - 20000LL + 200LL, 5, 1) check (i, -INT_MAX - 20000LL + 400LL, 5, 2) check (i, -20000LL + 600LL, 5, 3) check (i, INT_MAX - 20000LL + 800LL, 5, 4) e = 1; } } if (e) abort (); test (0, 3); test (1, 3); test (2, 1); test (3, 1); test (4, 4); test (5, 5); return 0; } int test3 (void) { int e = 0, idx; #pragma omp parallel reduction(+:e) { long long i; unsigned long long j; #pragma omp for schedule(static) nowait for (i = LLONG_MAX - 30001; LLONG_MAX - 10001 >= i; i += 10000) { check (i, LLONG_MAX - 30001, 0, 0) check (i, LLONG_MAX - 20001, 0, 1) check (i, LLONG_MAX - 10001, 0, 2) e = 1; } #pragma omp for schedule(static) nowait for (i = -LLONG_MAX + 30000; -LLONG_MAX + 10000 <= i; i -= 10000) { check (i, -LLONG_MAX + 30000, 1, 0) check (i, -LLONG_MAX + 20000, 1, 1) check (i, -LLONG_MAX + 10000, 1, 2) e = 1; } #pragma omp for schedule(static) nowait for (j = 20; LLONG_MAX - 70 >= j; j += LLONG_MAX + 50ULL) { check (j, 20, 2, 0) e = 1; } #pragma omp for schedule(static) nowait for (j = ULLONG_MAX - 3; LLONG_MAX + 70ULL <= j; j -= LLONG_MAX + 50ULL) { check (j, ULLONG_MAX - 3, 3, 0) e = 1; } #pragma omp for schedule(static) nowait for (j = LLONG_MAX - 20000ULL; LLONG_MAX + 10000ULL >= j; j += 10000ULL) { check (j, LLONG_MAX - 20000ULL, 4, 0) check (j, LLONG_MAX - 10000ULL, 4, 1) check (j, LLONG_MAX, 4, 2) check (j, LLONG_MAX + 10000ULL, 4, 3) e = 1; } #pragma omp for schedule(static) nowait for (i = -3LL * INT_MAX - 20000LL; INT_MAX + 10000LL >= i; i += INT_MAX + 200LL) { check (i, -3LL * INT_MAX - 20000LL, 5, 0) check (i, -2LL * INT_MAX - 20000LL + 200LL, 5, 1) check (i, -INT_MAX - 20000LL + 400LL, 5, 2) check (i, -20000LL + 600LL, 5, 3) check (i, INT_MAX - 20000LL + 800LL, 5, 4) e = 1; } } if (e) abort (); test (0, 3); test (1, 3); test (2, 1); test (3, 1); test (4, 4); test (5, 5); return 0; } int test4 (void) { int e = 0, idx; #pragma omp parallel reduction(+:e) { long long i; unsigned long long j; #pragma omp for schedule(static,1) nowait for (i = LLONG_MAX - 30001; LLONG_MAX - 10001 >= i; i += 10000) { check (i, LLONG_MAX - 30001, 0, 0) check (i, LLONG_MAX - 20001, 0, 1) check (i, LLONG_MAX - 10001, 0, 2) e = 1; } #pragma omp for schedule(static,1) nowait for (i = -LLONG_MAX + 30000; -LLONG_MAX + 10000 <= i; i -= 10000) { check (i, -LLONG_MAX + 30000, 1, 0) check (i, -LLONG_MAX + 20000, 1, 1) check (i, -LLONG_MAX + 10000, 1, 2) e = 1; } #pragma omp for schedule(static,1) nowait for (j = 20; LLONG_MAX - 70 >= j; j += LLONG_MAX + 50ULL) { check (j, 20, 2, 0) e = 1; } #pragma omp for schedule(static,1) nowait for (j = ULLONG_MAX - 3; LLONG_MAX + 70ULL <= j; j -= LLONG_MAX + 50ULL) { check (j, ULLONG_MAX - 3, 3, 0) e = 1; } #pragma omp for schedule(static,1) nowait for (j = LLONG_MAX - 20000ULL; LLONG_MAX + 10000ULL >= j; j += 10000ULL) { check (j, LLONG_MAX - 20000ULL, 4, 0) check (j, LLONG_MAX - 10000ULL, 4, 1) check (j, LLONG_MAX, 4, 2) check (j, LLONG_MAX + 10000ULL, 4, 3) e = 1; } #pragma omp for schedule(static,1) nowait for (i = -3LL * INT_MAX - 20000LL; INT_MAX + 10000LL >= i; i += INT_MAX + 200LL) { check (i, -3LL * INT_MAX - 20000LL, 5, 0) check (i, -2LL * INT_MAX - 20000LL + 200LL, 5, 1) check (i, -INT_MAX - 20000LL + 400LL, 5, 2) check (i, -20000LL + 600LL, 5, 3) check (i, INT_MAX - 20000LL + 800LL, 5, 4) e = 1; } } if (e) abort (); test (0, 3); test (1, 3); test (2, 1); test (3, 1); test (4, 4); test (5, 5); return 0; } int test5 (void) { int e = 0, idx; #pragma omp parallel reduction(+:e) { long long i; unsigned long long j; #pragma omp for schedule(runtime) nowait for (i = LLONG_MAX - 30001; LLONG_MAX - 10001 >= i; i += 10000) { check (i, LLONG_MAX - 30001, 0, 0) check (i, LLONG_MAX - 20001, 0, 1) check (i, LLONG_MAX - 10001, 0, 2) e = 1; } #pragma omp for schedule(runtime) nowait for (i = -LLONG_MAX + 30000; -LLONG_MAX + 10000 <= i; i -= 10000) { check (i, -LLONG_MAX + 30000, 1, 0) check (i, -LLONG_MAX + 20000, 1, 1) check (i, -LLONG_MAX + 10000, 1, 2) e = 1; } #pragma omp for schedule(runtime) nowait for (j = 20; LLONG_MAX - 70 >= j; j += LLONG_MAX + 50ULL) { check (j, 20, 2, 0) e = 1; } #pragma omp for schedule(runtime) nowait for (j = ULLONG_MAX - 3; LLONG_MAX + 70ULL <= j; j -= LLONG_MAX + 50ULL) { check (j, ULLONG_MAX - 3, 3, 0) e = 1; } #pragma omp for schedule(runtime) nowait for (j = LLONG_MAX - 20000ULL; LLONG_MAX + 10000ULL >= j; j += 10000ULL) { check (j, LLONG_MAX - 20000ULL, 4, 0) check (j, LLONG_MAX - 10000ULL, 4, 1) check (j, LLONG_MAX, 4, 2) check (j, LLONG_MAX + 10000ULL, 4, 3) e = 1; } #pragma omp for schedule(runtime) nowait for (i = -3LL * INT_MAX - 20000LL; INT_MAX + 10000LL >= i; i += INT_MAX + 200LL) { check (i, -3LL * INT_MAX - 20000LL, 5, 0) check (i, -2LL * INT_MAX - 20000LL + 200LL, 5, 1) check (i, -INT_MAX - 20000LL + 400LL, 5, 2) check (i, -20000LL + 600LL, 5, 3) check (i, INT_MAX - 20000LL + 800LL, 5, 4) e = 1; } } if (e) abort (); test (0, 3); test (1, 3); test (2, 1); test (3, 1); test (4, 4); test (5, 5); return 0; } int main (void) { if (2 * sizeof (int) != sizeof (long long)) return 0; test1 (); test2 (); test3 (); test4 (); omp_set_schedule (omp_sched_static, 0); test5 (); omp_set_schedule (omp_sched_static, 3); test5 (); omp_set_schedule (omp_sched_dynamic, 5); test5 (); omp_set_schedule (omp_sched_guided, 2); test5 (); return 0; }
parallelMain.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <time.h> #include <omp.h> #define TOL 10e-3 void printMatrix(double *matrix, int xDim, int yDim, FILE* fout) { int i = 0, j = 0; for (i = 0; i < xDim; i++) { for (j = 0; j < yDim; j++) { fprintf(fout, "%lf ", *(matrix + i * yDim + j)); } fprintf(fout, "\n"); } fclose(fout); } void printSparseMatrix(double *matrix, int xDim, int yDim, FILE* fout) { int i = 0, j = 0; for (i = 0; i < xDim; i++) { for (j = 0; j < yDim; j++) { double element = *(matrix + i * yDim + j); if (element != 0.0) { fprintf(fout, "(%d %d) %lf\n", i, j, *(matrix + i * yDim + j)); } } } fclose(fout); } double triangleArea(double vertices[][2]) { double area = 0; area = 0.5 * ((vertices[1][0] - vertices[0][0]) * (vertices[2][1] - vertices[0][1]) - (vertices[2][0] - vertices[0][0]) * (vertices[1][1] - vertices[0][1])); area = fabs(area); return area; } void localVector(double vertices[][2], double f, double localB[]) { int i; double area = triangleArea(vertices); for (i = 0; i < 3; i++) { localB[i] = (1.0 / 3) * area * f; } } double scalarProduct(double x[], double y[], int length) { int i = 0; double res = 0; for (i = 0; i < length; i++) { res += x[i] * y[i]; } return res; } void localStiffnessMatrix(double vertices[][2], double localW[][3]) { int i = 0, j = 0; double edges[3][2]; edges[0][0] = vertices[2][0] - vertices[1][0]; edges[0][1] = vertices[2][1] - vertices[1][1]; edges[1][0] = vertices[0][0] - vertices[2][0]; edges[1][1] = vertices[0][1] - vertices[2][1]; edges[2][0] = vertices[1][0] - vertices[0][0]; edges[2][1] = vertices[1][1] - vertices[0][1]; double area = triangleArea(vertices); for (i = 0; i < 3; i++) { for (j = 0; j < 3; j++) { localW[i][j] = 1 / (4 * area) * scalarProduct(edges[i], edges[j], 2); } } } int lineCount(FILE* file) { int lines = 0; char string[100]; rewind(file); while (fgets(string, sizeof(string), file) != NULL) { lines++; } return lines; } double* readMatrixFile(FILE* file, int rows, int columns) { int j = 0, i = 0; char string[100]; rewind(file); double* matrix = (double*) malloc(rows * columns * sizeof(double)); while (fgets(string, sizeof(string), file) != NULL) { for (j = 0; j < columns; j++) { *(matrix + i * columns + j) = atof( strtok(j == 0 ? string : NULL, ",")); } i++; } return matrix; } double* zeros(int dim) { int i = 0; double* vector __attribute__ ((aligned (16))); vector = (double*) malloc(sizeof(double) * dim); #pragma omp parallel for private(i) schedule(static) for (i = 0; i < dim; i++) { vector[i] = 0; } return vector; } void gaussianElimination(int n, double* matrix, double b[]){ int j, i, k; double aux; /* Gaussian elimination */ for (j = 0; j < (n - 1); j++) { // prova sullo static, default è dynamic #pragma omp parallel for private(aux, i, k) schedule(static) for (i = (j + 1); i < n; i++) { aux = *(matrix + n * i + j) / *(matrix + n * j + j); for (k = j; k < n; k++) { *(matrix + n * i + k) -= (aux * (*(matrix + n * j + k))); } b[i] -= (aux * b[j]); } } } double* backSubstitution(int n, double* matrix, double b[]) { int i, j; double temp; double* x = zeros(n); /* Back substitution */ x[n - 1] = b[n - 1] / *(matrix + n * (n - 1) + n - 1); for (i = (n - 2); i >= 0; i--) { temp = b[i]; for (j = (i + 1); j < n; j++) { temp -= (*(matrix + n * i + j) * x[j]); } x[i] = temp / *(matrix + n * i + i); } return x; } void assignDirichletCondition(int meshSize, double g, double* meshPoints, double* w, double* b) { int i, j; #pragma omp parallel for private(i, j) schedule(dynamic) for (i = 0; i < meshSize; i++) { double x = *(meshPoints + 2 * i); double y = *(meshPoints + 2 * i + 1); if (fabs(x * x + y * y - 1) < TOL) { for (j = 0; j < meshSize; j++) { *(w + meshSize * i + j) = (i == j) ? 1 : 0; } b[i] = g; } } } double* solvePDE(char fileP[], char fileT[], FILE* result) { int vertexSize, globalVertex2, globalVertex, meshSize; int tri = 0, i = 0, j = 0; double f = -4, g = 1; double vertices[3][2] = { { 0 } }; double localW[3][3] = { { 0 } }; double localB[3] = { 1 }; FILE* meshFile = fopen(fileP, "r"); FILE* vertexFile = fopen(fileT, "r"); vertexSize = lineCount(vertexFile); meshSize = lineCount(meshFile); /* Read files: START */ double startIO = omp_get_wtime(); clock_t startIOCPU = clock(); double* meshPoints = readMatrixFile(meshFile, lineCount(meshFile), 2); double* vertexNumbers = readMatrixFile(vertexFile, lineCount(vertexFile), 3); double ioTime = omp_get_wtime() - startIO; double ioTimeCPU = ((double) (clock() - startIOCPU) / CLOCKS_PER_SEC) / omp_get_max_threads(); /* Read files: END */ /* Prepare matrices: START */ double startZeros = omp_get_wtime(); clock_t startZerosCPU = clock(); double* w = zeros(meshSize * meshSize); double* b = zeros(meshSize); double zerosTime = omp_get_wtime() - startZeros; double zerosTimeCPU = ((double) (clock() - startZerosCPU) / CLOCKS_PER_SEC) / omp_get_max_threads(); /* Prepare matrices: START */ /* Assembling: START */ double startComputation = omp_get_wtime(); clock_t startComputationCPU = clock(); #pragma omp parallel for private(tri, i, j, globalVertex, globalVertex2, vertices, localW, localB) schedule(dynamic) for (tri = 0; tri < vertexSize; tri++) { for (i = 0; i < 3; i++) { globalVertex = (int) *(vertexNumbers + tri * 3 + i) - 1; vertices[i][0] = *(meshPoints + globalVertex * 2 + 0); vertices[i][1] = *(meshPoints + globalVertex * 2 + 1); } localStiffnessMatrix(vertices, localW); localVector(vertices, f, localB); for (i = 0; i < 3; i++) { globalVertex = (int) *(vertexNumbers + tri * 3 + i) - 1; #pragma omp critical b[globalVertex] += localB[i]; for (j = 0; j < 3; j++) { globalVertex2 = (int) *(vertexNumbers + tri * 3 + j) - 1; #pragma omp critical *(w + globalVertex * meshSize + globalVertex2) += localW[i][j]; } } } double assemblingTime = omp_get_wtime() - startComputation; double assemblingTimeCPU = ((double) (clock() - startComputationCPU) / CLOCKS_PER_SEC) / omp_get_max_threads(); /* Assembling: END */ /* Dirichlet condition: START */ double startDirichlet = omp_get_wtime(); clock_t startDirichletCPU = clock(); assignDirichletCondition(meshSize, g, meshPoints, w, b); double dirichletTime = omp_get_wtime() - startDirichlet; double dirichletTimeCPU = ((double) (clock() - startDirichletCPU) / CLOCKS_PER_SEC) / omp_get_max_threads(); /* Dirichlet condition: END */ /* Gaussian Elimination: START */ double startGauss = omp_get_wtime(); clock_t startGaussCPU = clock(); gaussianElimination(meshSize, w, b); double gaussTime = omp_get_wtime() - startGauss; double gaussTimeCPU = ((double) (clock() - startGaussCPU) / CLOCKS_PER_SEC) / omp_get_max_threads(); /* Gaussian Elimination: END */ /* Backward Substitution: START */ double startBack = omp_get_wtime(); clock_t startBackCPU = clock(); double* u = backSubstitution(meshSize, w, b); double backTime = omp_get_wtime() - startBack; double backTimeCPU = ((double) (clock() - startBackCPU) / CLOCKS_PER_SEC) / omp_get_max_threads(); /* Backward Substitution: END */ double computationTime = omp_get_wtime() - startComputation; double computationTimeCPU = ((double) (clock() - startComputationCPU) / CLOCKS_PER_SEC) / omp_get_max_threads(); /* Computation: END */ printf("Vertex number: %d.\n", meshSize); printf("Elapsed time for computation: %lf seconds.\n", computationTime); printf("Elapsed time for computation in CPU: %lf seconds.\n", computationTimeCPU); printf("Elapsed time for IO: %lf seconds.\n", ioTime); printf("Elapsed time for IO in CPU: %lf seconds.\n", ioTimeCPU); printf("Elapsed time for zeros: %lf seconds.\n", zerosTime); printf("Elapsed time for zeros in CPU: %lf seconds.\n", zerosTimeCPU); printf("Elapsed time for assembling: %lf seconds.\n", assemblingTime); printf("Elapsed time for assembling in CPU: %lf seconds.\n", assemblingTimeCPU); printf("Elapsed time for dirichlet: %lf seconds.\n", dirichletTime); printf("Elapsed time for dirichlet in CPU: %lf seconds.\n", dirichletTimeCPU); printf("Elapsed time for gaussian elimination: %lf seconds.\n", gaussTime); printf("Elapsed time for gaussian elimination in CPU: %lf seconds.\n", gaussTimeCPU); printf("Elapsed time for back substitution: %lf seconds.\n", backTime); printf("Elapsed time for back substitution in CPU: %lf seconds.\n", backTimeCPU); fprintf(result, "%d; %lf; %lf; %lf; %lf; %lf; %lf; %lf; %lf; %lf; %lf; %lf; %lf; %lf; %lf\n", meshSize, computationTime, ioTime, zerosTime, assemblingTime, dirichletTime, gaussTime, backTime, computationTimeCPU, ioTimeCPU, zerosTimeCPU, assemblingTimeCPU, dirichletTimeCPU, gaussTimeCPU, backTimeCPU); fclose(meshFile); fclose(vertexFile); free(meshPoints); free(vertexNumbers); free(w); free(b); free(u); return u; } int main(int argc, char **argv) { int i; char dirP[50] = "data/p"; char dirT[50] = "data/t"; char extension[50] = ".csv"; char fileP[100]; char fileT[100]; FILE* result; if(argc != 3){ printf("usage: ./pmain file_number parallel_flag\n"); return 1; } int files = atoi(argv[1]); if(strcmp(argv[2], "-p") == 0){ printf("Parallel option enabled...\n"); printf("Running with %d threads...\n", omp_get_max_threads()); result = fopen("resParallel.csv", "w"); omp_set_num_threads(omp_get_max_threads()); } else { omp_set_num_threads(1); result = fopen("resSerial.csv", "w"); } fprintf(result, "meshSize; computationTime; ioTime; zerosTime; assemblingTime; dirichletTime; gaussTime; backTime; computationTimeCPU; ioTimeCPU; zerosTimeCPU; assemblingTimeCPU; dirichletTimeCPU; gaussTimeCPU; backTimeCPU\n"); for (i = 0; i < files; i++) { sprintf(fileP, "%s%d%s", dirP, i, extension); sprintf(fileT, "%s%d%s", dirT, i, extension); printf("Iteration %d\n", i); solvePDE(fileP, fileT, result); } fclose(result); return 0; }
bucle-for-modificado.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> int main(int argc, char **argv) { int i, n = 9; if(argc < 2) { fprintf(stderr,"\n[ERROR] - Falta no iteraciones \n"); exit(-1); } n = atoi(argv[1]); #pragma omp parallel for for (i=0; i<n; i++) printf("thread %d ejecuta la iteración %d del bucle\n", omp_get_thread_num(), i); return(0); }
ddot_kahan_omp_avx_intrin.c
#ifdef __AVX__ #include <stdio.h> #include <math.h> #include <omp.h> #include "immintrin.h" double ddot_kahan_avx_intrin(int, const double*, const double*, double*); void ddot_kahan_omp_avx_intrin(int N, const double *a, const double *b, double *r) { double *sum; double *c; int nthreads; #pragma omp parallel { #pragma omp single { nthreads = omp_get_num_threads(); if ((sum = (double *)malloc(nthreads * sizeof(double))) == NULL) { perror("malloc"); exit(EXIT_FAILURE); } if ((c = (double *)malloc(nthreads * sizeof(double))) == NULL) { perror("malloc"); exit(EXIT_FAILURE); } } } if (N < nthreads) { ddot_kahan_avx_intrin(N, a, b, r); return; } #pragma omp parallel { int i, id; id = omp_get_thread_num(); /* calculate each threads chunk */ int alignment = 64 / sizeof(double); int gchunk = ((N/alignment)+(nthreads-1))/nthreads; gchunk = gchunk * alignment; int chunk = gchunk; if ((id+1)*chunk > N) chunk = N-(id*chunk); if (chunk < 0) chunk = 0; /* each thread sums part of the array */ c[id] = ddot_kahan_avx_intrin(chunk, a+id*gchunk, b+id*gchunk, &sum[id]); } // end #pragma omp parallel /* perform scalar Kahan sum of partial sums */ double scalar_c = 0.0f; double scalar_sum = 0.0f; #pragma novector for (int i=0; i<nthreads; ++i) { scalar_c = scalar_c + c[i]; double y = sum[i]-scalar_c; double t = scalar_sum+y; scalar_c = (t-scalar_sum)-y; scalar_sum = t; } sum[0] = scalar_sum; *r = sum[0]; } #endif
NewTimer.h
////////////////////////////////////////////////////////////////// // (c) Copyright 2008- by Ken Esler ////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////// // Ken Esler // National Center for Supercomputing Applications & // University of Illinois, Urbana-Champaign // Urbana, IL 61801 // e-mail: jnkim@ncsa.uiuc.edu // // Supported by // National Center for Supercomputing Applications, UIUC // Materials Computation Center, UIUC ////////////////////////////////////////////////////////////////// // -*- C++ -*- /** @file NewTimer.h * @brief NewTimer class various high-resolution timers. */ #ifndef QMCPLUSPLUS_NEW_TIMER_H #define QMCPLUSPLUS_NEW_TIMER_H #include <Utilities/Clock.h> #include <vector> #include <string> #include <algorithm> class Communicate; namespace qmcplusplus { /* Timer using omp_get_wtime */ class NewTimer { protected: double start_time; double total_time; long num_calls; std::string name; public: #if defined(DISABLE_TIMER) inline void start() {} inline void stop() {} #else inline void start() { start_time = cpu_clock(); } inline void stop() { total_time += cpu_clock() - start_time; num_calls++; } #endif inline double get_total() const { return total_time; } inline long get_num_calls() const { return num_calls; } inline std::string get_name() const { return name; } inline void reset() { num_calls = 0; total_time=0.0; } NewTimer(const std::string& myname) : total_time(0.0), num_calls(0), name(myname) { } void set_name(const std::string& myname) { name=myname; } }; struct TimerComparator { inline bool operator()(const NewTimer *a, const NewTimer *b) { return a->get_name() < b->get_name(); } }; class TimerManagerClass { protected: std::vector<NewTimer*> TimerList; public: inline void addTimer (NewTimer* t) { #pragma omp critical { TimerList.push_back(t); } } void reset(); void print (Communicate* comm); }; extern TimerManagerClass TimerManager; } #endif
DRB065-pireduction-orig-no.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Classic PI calculation using reduction */ #define num_steps 2000000000 #include <stdio.h> int main(int argc, char** argv) { double pi = 0.0; long int i; double x, interval_width; interval_width = 1.0/(double)num_steps; #pragma omp parallel for reduction(+:pi) private(i, x) for (i = 0; i < num_steps; i++) { x = (i+ 0.5) * interval_width; pi += 1.0 / (x*x + 1.0); } pi = pi * 4.0 * interval_width; printf ("PI=%f\n", pi); return 0; }
serial_tree_learner.h
/*! * Copyright (c) 2016 Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE file in the project root for license information. */ #ifndef LIGHTGBM_TREELEARNER_SERIAL_TREE_LEARNER_H_ #define LIGHTGBM_TREELEARNER_SERIAL_TREE_LEARNER_H_ #include <LightGBM/dataset.h> #include <LightGBM/tree.h> #include <LightGBM/tree_learner.h> #include <LightGBM/utils/array_args.h> #include <LightGBM/utils/random.h> #include <string> #include <cmath> #include <cstdio> #include <memory> #include <random> #include <vector> #include "data_partition.hpp" #include "feature_histogram.hpp" #include "leaf_splits.hpp" #include "split_info.hpp" #ifdef USE_GPU // Use 4KBytes aligned allocator for ordered gradients and ordered hessians when GPU is enabled. // This is necessary to pin the two arrays in memory and make transferring faster. #include <boost/align/aligned_allocator.hpp> #endif using namespace json11; namespace LightGBM { /*! \brief forward declaration */ class CostEfficientGradientBoosting; /*! * \brief Used for learning a tree by single machine */ class SerialTreeLearner: public TreeLearner { public: friend CostEfficientGradientBoosting; explicit SerialTreeLearner(const Config* config); ~SerialTreeLearner(); void Init(const Dataset* train_data, bool is_constant_hessian) override; void ResetTrainingData(const Dataset* train_data) override; void ResetConfig(const Config* config) override; Tree* Train(const score_t* gradients, const score_t *hessians, bool is_constant_hessian, const Json& forced_split_json) override; Tree* FitByExistingTree(const Tree* old_tree, const score_t* gradients, const score_t* hessians) const override; Tree* FitByExistingTree(const Tree* old_tree, const std::vector<int>& leaf_pred, const score_t* gradients, const score_t* hessians) override; void SetBaggingData(const data_size_t* used_indices, data_size_t num_data) override { data_partition_->SetUsedDataIndices(used_indices, num_data); } void AddPredictionToScore(const Tree* tree, double* out_score) const override { if (tree->num_leaves() <= 1) { return; } CHECK(tree->num_leaves() <= data_partition_->num_leaves()); #pragma omp parallel for schedule(static) for (int i = 0; i < tree->num_leaves(); ++i) { double output = static_cast<double>(tree->LeafOutput(i)); data_size_t cnt_leaf_data = 0; auto tmp_idx = data_partition_->GetIndexOnLeaf(i, &cnt_leaf_data); for (data_size_t j = 0; j < cnt_leaf_data; ++j) { out_score[tmp_idx[j]] += output; } } } void RenewTreeOutput(Tree* tree, const ObjectiveFunction* obj, std::function<double(const label_t*, int)> residual_getter, data_size_t total_num_data, const data_size_t* bag_indices, data_size_t bag_cnt) const override; protected: virtual std::vector<int8_t> GetUsedFeatures(bool is_tree_level); /*! * \brief Some initial works before training */ virtual void BeforeTrain(); /*! * \brief Some initial works before FindBestSplit */ virtual bool BeforeFindBestSplit(const Tree* tree, int left_leaf, int right_leaf); virtual void FindBestSplits(); virtual void ConstructHistograms(const std::vector<int8_t>& is_feature_used, bool use_subtract); virtual void FindBestSplitsFromHistograms(const std::vector<int8_t>& is_feature_used, bool use_subtract); /*! * \brief Partition tree and data according best split. * \param tree Current tree, will be splitted on this function. * \param best_leaf The index of leaf that will be splitted. * \param left_leaf The index of left leaf after splitted. * \param right_leaf The index of right leaf after splitted. */ virtual void Split(Tree* tree, int best_leaf, int* left_leaf, int* right_leaf); /* Force splits with forced_split_json dict and then return num splits forced.*/ virtual int32_t ForceSplits(Tree* tree, const Json& forced_split_json, int* left_leaf, int* right_leaf, int* cur_depth, bool *aborted_last_force_split); /*! * \brief Get the number of data in a leaf * \param leaf_idx The index of leaf * \return The number of data in the leaf_idx leaf */ inline virtual data_size_t GetGlobalDataCountInLeaf(int leaf_idx) const; /*! \brief number of data */ data_size_t num_data_; /*! \brief number of features */ int num_features_; /*! \brief training data */ const Dataset* train_data_; /*! \brief gradients of current iteration */ const score_t* gradients_; /*! \brief hessians of current iteration */ const score_t* hessians_; /*! \brief training data partition on leaves */ std::unique_ptr<DataPartition> data_partition_; /*! \brief used for generate used features */ Random random_; /*! \brief used for sub feature training, is_feature_used_[i] = false means don't used feature i */ std::vector<int8_t> is_feature_used_; /*! \brief used feature indices in current tree */ std::vector<int> used_feature_indices_; /*! \brief pointer to histograms array of parent of current leaves */ FeatureHistogram* parent_leaf_histogram_array_; /*! \brief pointer to histograms array of smaller leaf */ FeatureHistogram* smaller_leaf_histogram_array_; /*! \brief pointer to histograms array of larger leaf */ FeatureHistogram* larger_leaf_histogram_array_; /*! \brief store best split points for all leaves */ std::vector<SplitInfo> best_split_per_leaf_; /*! \brief store best split per feature for all leaves */ std::vector<SplitInfo> splits_per_leaf_; /*! \brief stores best thresholds for all feature for smaller leaf */ std::unique_ptr<LeafSplits> smaller_leaf_splits_; /*! \brief stores best thresholds for all feature for larger leaf */ std::unique_ptr<LeafSplits> larger_leaf_splits_; std::vector<int> valid_feature_indices_; #ifdef USE_GPU /*! \brief gradients of current iteration, ordered for cache optimized, aligned to 4K page */ std::vector<score_t, boost::alignment::aligned_allocator<score_t, 4096>> ordered_gradients_; /*! \brief hessians of current iteration, ordered for cache optimized, aligned to 4K page */ std::vector<score_t, boost::alignment::aligned_allocator<score_t, 4096>> ordered_hessians_; #else /*! \brief gradients of current iteration, ordered for cache optimized */ std::vector<score_t> ordered_gradients_; /*! \brief hessians of current iteration, ordered for cache optimized */ std::vector<score_t> ordered_hessians_; #endif /*! \brief Store ordered bin */ std::vector<std::unique_ptr<OrderedBin>> ordered_bins_; /*! \brief True if has ordered bin */ bool has_ordered_bin_ = false; /*! \brief is_data_in_leaf_[i] != 0 means i-th data is marked */ std::vector<char> is_data_in_leaf_; /*! \brief used to cache historical histogram to speed up*/ HistogramPool histogram_pool_; /*! \brief config of tree learner*/ const Config* config_; int num_threads_; std::vector<int> ordered_bin_indices_; bool is_constant_hessian_; std::unique_ptr<CostEfficientGradientBoosting> cegb_; }; inline data_size_t SerialTreeLearner::GetGlobalDataCountInLeaf(int leaf_idx) const { if (leaf_idx >= 0) { return data_partition_->leaf_count(leaf_idx); } else { return 0; } } } // namespace LightGBM #endif // LightGBM_TREELEARNER_SERIAL_TREE_LEARNER_H_
GB_unaryop__lnot_fp32_int64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_fp32_int64 // op(A') function: GB_tran__lnot_fp32_int64 // C type: float // A type: int64_t // cast: float cij = (float) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ int64_t #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, aij) \ float z = (float) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_FP32 || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_fp32_int64 ( float *Cx, // Cx and Ax may be aliased int64_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_fp32_int64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
LevelElimination.h
/* * LevelElimination.h * * Created on: 10.01.2015 * Author: Michael */ #ifndef LEVELELIMINATION_H_ #define LEVELELIMINATION_H_ #include "Level.h" #include "EliminationStage.h" namespace NetworKit { /** * @ingroup numerics */ template<class Matrix> class LevelElimination : public Level<Matrix> { private: std::vector<EliminationStage<Matrix>> coarseningStages; std::vector<index> cIndexFine; void subVectorExtract(Vector& subVector, const Vector& vector, const std::vector<index>& elements) const; public: LevelElimination(const Matrix& A, const std::vector<EliminationStage<Matrix>>& coarseningStages); void coarseType(const Vector& xf, Vector& xc) const; void restrict(const Vector& bf, Vector& bc, std::vector<Vector>& bStages) const; void interpolate(const Vector& xc, Vector& xf, const std::vector<Vector>& bStages) const; }; template<class Matrix> LevelElimination<Matrix>::LevelElimination(const Matrix& A, const std::vector<EliminationStage<Matrix>>& coarseningStages) : Level<Matrix>(LevelType::ELIMINATION, A), coarseningStages(coarseningStages) { cIndexFine = std::vector<index>(this->A.numberOfRows()); #pragma omp parallel for for (omp_index i = 0; i < static_cast<omp_index>(cIndexFine.size()); ++i) { cIndexFine[i] = i; } for (index k = coarseningStages.size(); k-- > 0;) { for (index i = 0; i < cIndexFine.size(); ++i) { assert(cIndexFine[i] < coarseningStages[k].getCSet().size()); cIndexFine[i] = coarseningStages[k].getCSet()[cIndexFine[i]]; } } } template<class Matrix> void LevelElimination<Matrix>::coarseType(const Vector& xf, Vector& xc) const { xc = Vector(this->A.numberOfRows()); #pragma omp parallel for for (omp_index i = 0; i < static_cast<omp_index>(xc.getDimension()); ++i) { xc[i] = xf[cIndexFine[i]]; } } template<class Matrix> void LevelElimination<Matrix>::restrict(const Vector& bf, Vector& bc, std::vector<Vector>& bStages) const { bStages.resize(coarseningStages.size() + 1); bStages[0] = bf; bc = bf; index curStage = 0; for (const EliminationStage<Matrix>& s : coarseningStages) { //Vector bOld = bStages[curStage]; Vector bCSet; subVectorExtract(bCSet, bc, s.getCSet()); Vector bFSet; subVectorExtract(bFSet, bc, s.getFSet()); bc = bCSet + s.getR() * bFSet; bStages[curStage+1] = bc; // b = b.c + s.P^T * b.f curStage++; } } template<class Matrix> void LevelElimination<Matrix>::interpolate(const Vector& xc, Vector& xf, const std::vector<Vector>& bStages) const { Vector currX = xc; for (index k = coarseningStages.size(); k-- > 0;) { const EliminationStage<Matrix>& s = coarseningStages[k]; xf = Vector(s.getN()); Vector bFSet; subVectorExtract(bFSet, bStages[k], s.getFSet()); Vector bq(bFSet.getDimension()); const Vector &q = s.getQ(); #pragma omp parallel for for (omp_index i = 0; i < static_cast<omp_index>(bq.getDimension()); ++i) { // bq = s.q .* b.f bq[i] = q[i] * bFSet[i]; } Vector xFSet = s.getP() * currX + bq; const std::vector<index> &fSet = s.getFSet(); #pragma omp parallel for for (omp_index i = 0; i < static_cast<omp_index>(xFSet.getDimension()); ++i) { xf[fSet[i]] = xFSet[i]; } const std::vector<index> &cSet = s.getCSet(); #pragma omp parallel for for (omp_index i = 0; i < static_cast<omp_index>(currX.getDimension()); ++i) { xf[cSet[i]] = currX[i]; } currX = xf; } } template<class Matrix> void LevelElimination<Matrix>::subVectorExtract(Vector& subVector, const Vector& vector, const std::vector<index>& elements) const { subVector = Vector(elements.size()); #pragma omp parallel for for (omp_index i = 0; i < static_cast<omp_index>(elements.size()); ++i) { subVector[i] = vector[elements[i]]; } } } /* namespace NetworKit */ #endif /* LEVELELIMINATION_H_ */
InfomapGreedyCommon.h
/********************************************************************************** Infomap software package for multi-level network clustering Copyright (c) 2013, 2014 Daniel Edler, Martin Rosvall For more information, see <http://www.mapequation.org> This file is part of Infomap software package. Infomap software package is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. Infomap software package is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with Infomap software package. If not, see <http://www.gnu.org/licenses/>. **********************************************************************************/ #ifndef INFOMAPGREEDYCOMMON_H_ #define INFOMAPGREEDYCOMMON_H_ #include "InfomapGreedySpecialized.h" #include <memory> #ifdef _OPENMP #include <omp.h> #include <stdio.h> #endif #include <limits> #ifdef NS_INFOMAP namespace infomap { #endif struct CompNodePair { bool operator() (const std::pair<NodeBase*, NodeBase*>& first, const std::pair<NodeBase*, NodeBase*>& second) const { return first.first->index == second.first->index ? (first.second->index < second.second->index) : (first.first->index < second.first->index); } }; template<typename InfomapGreedyDerivedType> class InfomapGreedyCommon : public InfomapGreedySpecialized<typename derived_traits<InfomapGreedyDerivedType>::flow_type> { typedef typename derived_traits<InfomapGreedyDerivedType>::flow_type FlowType; typedef typename derived_traits<InfomapGreedyDerivedType>::node_type NodeType; typedef typename derived_traits<InfomapGreedyDerivedType>::deltaflow_type DeltaFlowType; typedef InfomapGreedySpecialized<FlowType> Super; typedef typename flowData_traits<FlowType>::detailed_balance_type DetailedBalanceType; typedef typename flowData_traits<FlowType>::directed_with_recorded_teleportation_type DirectedWithRecordedTeleportationType; typedef typename flowData_traits<FlowType>::teleportation_type TeleportationType; typedef typename flowData_traits<FlowType>::is_directed_type IsDirectedType; typedef Edge<NodeBase> EdgeType; public: InfomapGreedyCommon(const Config& conf, NodeFactoryBase* nodeFactory) : InfomapGreedySpecialized<FlowType>(conf, nodeFactory), m_coreLoopCount(0) {} InfomapGreedyCommon(const InfomapBase& infomap, NodeFactoryBase* nodeFactory) : InfomapGreedySpecialized<FlowType>(infomap, nodeFactory), m_coreLoopCount(0) {} virtual ~InfomapGreedyCommon() {} private: InfomapGreedyDerivedType& derived(); protected: using Super::root; virtual std::auto_ptr<InfomapBase> getNewInfomapInstance(); NodeType& getNode(NodeBase& node); const NodeType& getNode(const NodeBase& node) const; virtual unsigned int aggregateFlowValuesFromLeafToRoot(); virtual double calcCodelengthOnAllNodesInTree(); virtual double calcCodelengthOnRootOfLeafNodes(const NodeBase& parent); virtual double calcCodelengthOnModuleOfModules(const NodeBase& parent); virtual double calcCodelengthOnModuleOfLeafNodes(const NodeBase& parent); virtual std::pair<double, double> calcCodelength(const NodeBase& parent); virtual void initConstantInfomapTerms(); virtual void calculateCodelengthFromActiveNetwork(); virtual unsigned int optimizeModules(); virtual unsigned int optimizeModulesCrude(); unsigned int tryMoveEachNodeIntoBestModule(); unsigned int tryMoveEachNodeIntoBestModuleParallelizable(); unsigned int tryMoveEachNodeIntoBestModuleInParallel(); unsigned int tryMoveEachNodeIntoStrongestConnectedModule(); virtual void moveNodesToPredefinedModules(); virtual unsigned int consolidateModules(bool replaceExistingStructure, bool asSubModules); unsigned int m_coreLoopCount; using Super::m_treeData; using Super::m_config; }; template<typename InfomapGreedyDerivedType> inline InfomapGreedyDerivedType& InfomapGreedyCommon<InfomapGreedyDerivedType>::derived() { return static_cast<InfomapGreedyDerivedType&>(*this); } template<typename InfomapGreedyDerivedType> inline std::auto_ptr<InfomapBase> InfomapGreedyCommon<InfomapGreedyDerivedType>::getNewInfomapInstance() { return std::auto_ptr<InfomapBase>(new InfomapGreedyDerivedType(*this)); } template<typename InfomapGreedyDerivedType> inline typename InfomapGreedyCommon<InfomapGreedyDerivedType>::NodeType& InfomapGreedyCommon<InfomapGreedyDerivedType>::getNode(NodeBase& node) { return static_cast<NodeType&>(node); } template<typename InfomapGreedyDerivedType> inline const typename InfomapGreedyCommon<InfomapGreedyDerivedType>::NodeType& InfomapGreedyCommon<InfomapGreedyDerivedType>::getNode(const NodeBase& node) const { return static_cast<const NodeType&>(node); } template<typename InfomapGreedyDerivedType> inline unsigned int InfomapGreedyCommon<InfomapGreedyDerivedType>::aggregateFlowValuesFromLeafToRoot() { FlowType& rootData = getNode(*root()).data; rootData = FlowType(0.0, 0.0); unsigned int numLevels = 0; // Aggregate flow from leaf nodes to root node for (NodeBase::post_depth_first_iterator it(root()); !it.isEnd(); ++it) { NodeType& node = getNode(*it); if (!node.isRoot()) getNode(*node.parent).data += node.data; // Don't aggregate enter and exit flow if (!node.isLeaf()) { node.originalIndex = it.depth(); // Use originalIndex to store the depth on modules node.data.exitFlow = 0.0; node.data.enterFlow = 0.0; } else numLevels = std::max(numLevels, it.depth()); } if (std::abs(rootData.flow - 1.0) > 1e-10) Log() << "Warning, aggregated flow is not exactly 1.0, but " << rootData.flow << ".\n"; // Aggregate enter and exit flow between modules for (TreeData::leafIterator leafIt(m_treeData.begin_leaf()); leafIt != m_treeData.end_leaf(); ++leafIt) { NodeBase& leafNodeSource = **leafIt; for (NodeBase::edge_iterator edgeIt(leafNodeSource.begin_outEdge()), endIt(leafNodeSource.end_outEdge()); edgeIt != endIt; ++edgeIt) { EdgeType& edge = **edgeIt; NodeBase& leafNodeTarget = edge.target; double linkFlow = edge.data.flow; NodeBase* node1 = leafNodeSource.parent; NodeBase* node2 = leafNodeTarget.parent; if (node1 == node2) continue; // First aggregate link flow until equal depth while(node1->originalIndex > node2->originalIndex) { getNode(*node1).data.exitFlow += linkFlow; node1 = node1->parent; } while(node2->originalIndex > node1->originalIndex) { getNode(*node2).data.enterFlow += linkFlow; node2 = node2->parent; } // Then aggregate link flow until equal parent while (node1 != node2) { getNode(*node1).data.exitFlow += linkFlow; getNode(*node2).data.enterFlow += linkFlow; node1 = node1->parent; node2 = node2->parent; } } } Super::addTeleportationFlowOnModules(); return numLevels; } template<typename InfomapGreedyDerivedType> inline double InfomapGreedyCommon<InfomapGreedyDerivedType>::calcCodelengthOnAllNodesInTree() { double sumCodelength = 0.0; for (NodeBase::pre_depth_first_iterator it(root()); !it.isEnd(); ++it) { NodeBase& node = *it; if (node.isLeaf()) node.codelength = 0.0; else if (node.isLeafModule()) node.codelength = calcCodelengthOnModuleOfLeafNodes(node); else node.codelength = calcCodelengthOnModuleOfModules(node); sumCodelength += node.codelength; } return sumCodelength; } template<typename InfomapGreedyDerivedType> inline double InfomapGreedyCommon<InfomapGreedyDerivedType>::calcCodelengthOnRootOfLeafNodes(const NodeBase& parent) { return calcCodelengthOnModuleOfLeafNodes(parent); } template<typename InfomapGreedyDerivedType> inline double InfomapGreedyCommon<InfomapGreedyDerivedType>::calcCodelengthOnModuleOfLeafNodes(const NodeBase& parent) { const FlowType& parentData = getNode(parent).data; double parentFlow = parentData.flow; double parentExit = parentData.exitFlow; double totalParentFlow = parentFlow + parentExit; if (totalParentFlow < 1e-16) return 0.0; double indexLength = 0.0; // For each child for (NodeBase::const_sibling_iterator childIt(parent.begin_child()), endIt(parent.end_child()); childIt != endIt; ++childIt) { indexLength -= infomath::plogp(getNode(*childIt).data.flow / totalParentFlow); } indexLength -= infomath::plogp(parentExit / totalParentFlow); indexLength *= totalParentFlow; return indexLength; } template<typename InfomapGreedyDerivedType> inline double InfomapGreedyCommon<InfomapGreedyDerivedType>::calcCodelengthOnModuleOfModules(const NodeBase& parent) { const FlowType& parentData = getNode(parent).data; double parentExit = parentData.exitFlow; if (parentData.flow < 1e-16) return 0.0; // H(x) = -xlog(x), T = q + SUM(p), q = exitFlow, p = enterFlow // Normal format // L = q * -log(q/T) + p * SUM(-log(p/T)) // Compact format // L = T * ( H(q/T) + SUM( H(p/T) ) ) // Expanded format // L = q * -log(q) - q * -log(T) + SUM( p * -log(p) - p * -log(T) ) = T * log(T) - q*log(q) - SUM( p*log(p) ) // As T is not known, use expanded format to avoid two loops double sumEnter = 0.0; double sumEnterLogEnter = 0.0; for (NodeBase::const_sibling_iterator childIt(parent.begin_child()), endIt(parent.end_child()); childIt != endIt; ++childIt) { const double& enterFlow = getNode(*childIt).data.enterFlow; // rate of enter to finer level sumEnter += enterFlow; sumEnterLogEnter += infomath::plogp(enterFlow); } // The possibilities from this module: Either exit to coarser level or enter one of its children double totalCodewordUse = parentExit + sumEnter; return infomath::plogp(totalCodewordUse) - sumEnterLogEnter - infomath::plogp(parentExit); } template<typename InfomapGreedyDerivedType> inline std::pair<double, double> InfomapGreedyCommon<InfomapGreedyDerivedType>::calcCodelength(const NodeBase& parent) { double indexCodelength = calcCodelengthOnModuleOfModules(parent); double moduleCodelength = 0.0; for (NodeBase::const_sibling_iterator childIt(parent.begin_child()), endIt(parent.end_child()); childIt != endIt; ++childIt) { moduleCodelength += calcCodelengthOnModuleOfLeafNodes(*childIt); } return std::make_pair(indexCodelength, moduleCodelength); } template<typename InfomapGreedyDerivedType> void InfomapGreedyCommon<InfomapGreedyDerivedType>::initConstantInfomapTerms() { // Not constant for memory Infomap! Super::nodeFlow_log_nodeFlow = 0.0; // For each module for (typename Super::activeNetwork_iterator it(Super::m_activeNetwork.begin()), itEnd(Super::m_activeNetwork.end()); it != itEnd; ++it) { NodeType& node = getNode(**it); Super::nodeFlow_log_nodeFlow += infomath::plogp(node.data.flow); } } /** * Specialized for the case when enter and exit flow may differ */ template<typename InfomapGreedyDerivedType> void InfomapGreedyCommon<InfomapGreedyDerivedType>::calculateCodelengthFromActiveNetwork() { Super::enter_log_enter = 0.0; Super::flow_log_flow = 0.0; Super::exit_log_exit = 0.0; Super::enterFlow = 0.0; // For each module for (typename Super::activeNetwork_iterator it(Super::m_activeNetwork.begin()), itEnd(Super::m_activeNetwork.end()); it != itEnd; ++it) { NodeType& node = getNode(**it); // own node/module codebook Super::flow_log_flow += infomath::plogp(node.data.flow + node.data.exitFlow); // use of index codebook Super::enter_log_enter += infomath::plogp(node.data.enterFlow); Super::exit_log_exit += infomath::plogp(node.data.exitFlow); Super::enterFlow += node.data.enterFlow; } Super::enterFlow += Super::exitNetworkFlow; Super::enterFlow_log_enterFlow = infomath::plogp(Super::enterFlow); derived().calculateNodeFlow_log_nodeFlowForMemoryNetwork(); Super::indexCodelength = Super::enterFlow_log_enterFlow - Super::enter_log_enter - Super::exitNetworkFlow_log_exitNetworkFlow; Super::moduleCodelength = -Super::exit_log_exit + Super::flow_log_flow - Super::nodeFlow_log_nodeFlow; Super::codelength = Super::indexCodelength + Super::moduleCodelength; } template<typename InfomapGreedyDerivedType> inline unsigned int InfomapGreedyCommon<InfomapGreedyDerivedType>::optimizeModules() { DEBUG_OUT("\nInfomapGreedyCommon<InfomapGreedyDerivedType>::optimizeModules()"); m_coreLoopCount = 0; double oldCodelength = Super::codelength; unsigned int loopLimit = Super::m_config.coreLoopLimit; unsigned int minRandLoop = 2; if (loopLimit >= minRandLoop && Super::m_config.randomizeCoreLoopLimit) loopLimit = static_cast<unsigned int>(Super::m_rand() * (loopLimit - minRandLoop)) + minRandLoop; unsigned int loopLimitOnAggregationLevels = 20; // Iterate while the optimization loop moves some nodes within the dynamic modular structure do { oldCodelength = Super::codelength; if (Super::m_config.innerParallelization) tryMoveEachNodeIntoBestModuleInParallel(); // returns numNodesMoved else tryMoveEachNodeIntoBestModule(); // returns numNodesMoved ++m_coreLoopCount; } while (m_coreLoopCount != (Super::m_aggregationLevel == 0 && !Super::m_isCoarseTune? loopLimit : loopLimitOnAggregationLevels) && Super::codelength < oldCodelength - Super::m_config.minimumCodelengthImprovement); return m_coreLoopCount; } template<typename InfomapGreedyDerivedType> inline unsigned int InfomapGreedyCommon<InfomapGreedyDerivedType>::optimizeModulesCrude() { m_coreLoopCount = 0; // double oldCodelength = Super::codelength; unsigned int loopLimit = Super::m_config.coreLoopLimit; unsigned int minRandLoop = 3; if (loopLimit >= minRandLoop && Super::m_config.randomizeCoreLoopLimit) loopLimit = static_cast<unsigned int>(Super::m_rand() * (loopLimit - minRandLoop)) + minRandLoop; // unsigned int loopLimitOnAggregationLevels = -1; unsigned int numMoved = 0; // Iterate while the optimization loop moves some nodes within the dynamic modular structure do { // oldCodelength = Super::codelength; numMoved = tryMoveEachNodeIntoStrongestConnectedModule(); // returns numNodesMoved ++m_coreLoopCount; } while (m_coreLoopCount != loopLimit && numMoved > 0); return m_coreLoopCount; } /** * Minimize the codelength by trying to move each node into best module. * * For each node: * 1. Calculate the change in codelength for a move to each of its neighbouring modules or to an empty module * 2. Move to the one that reduces the codelength the most, if any. * * @return The number of nodes moved. */ template<typename InfomapGreedyDerivedType> inline unsigned int InfomapGreedyCommon<InfomapGreedyDerivedType>::tryMoveEachNodeIntoBestModule() { unsigned int numNodes = Super::m_activeNetwork.size(); // Get random enumeration of nodes std::vector<unsigned int> randomOrder(numNodes); infomath::getRandomizedIndexVector(randomOrder, Super::m_rand); std::vector<DeltaFlowType> moduleDeltaEnterExit(numNodes); std::vector<unsigned int> redirect(numNodes, 0); unsigned int offset = 1; unsigned int maxOffset = std::numeric_limits<unsigned int>::max() - 1 - numNodes; unsigned int numMoved = 0; for (unsigned int i = 0; i < numNodes; ++i) { // Reset offset before overflow if (offset > maxOffset) { redirect.assign(numNodes, 0); offset = 1; } // Pick nodes in random order unsigned int flip = randomOrder[i]; NodeType& current = getNode(*Super::m_activeNetwork[flip]); if (!current.dirty) continue; // Don't move out from previous merge on first loop if (Super::m_moduleMembers[current.index] > 1 && Super::isFirstLoop() && m_config.tuneIterationLimit != 1) continue; // Don't decrease the number of modules if already equal the preferred number if (Super::isTopLevel() && Super::numActiveModules() == m_config.preferredNumberOfModules && Super::m_moduleMembers[current.index] == 1) continue; // If no links connecting this node with other nodes, it won't move into others, // and others won't move into this. TODO: Always best leave it alone? // if (current.degree() == 0) // if (current.degree() == 0 || // (Super::m_config.includeSelfLinks && // (current.outDegree() == 1 && current.inDegree() == 1) && // (**current.begin_outEdge()).target == current)) // { // DEBUG_OUT("SKIPPING isolated node " << current << "\n"); // //TODO: If not skipping self-links, this yields different results from moveNodesToPredefinedModules!! // ASSERT(!m_config.includeSelfLinks); // current.dirty = false; // continue; // } // Create vector with module links unsigned int numModuleLinks = 0; if (current.isDangling()) { redirect[current.index] = offset + numModuleLinks; moduleDeltaEnterExit[numModuleLinks] = DeltaFlowType(current.index, 0.0, 0.0); ++numModuleLinks; } else { // For all outlinks for (NodeBase::edge_iterator edgeIt(current.begin_outEdge()), endIt(current.end_outEdge()); edgeIt != endIt; ++edgeIt) { EdgeType& edge = **edgeIt; if (edge.isSelfPointing()) continue; NodeType& neighbour = getNode(edge.target); if (redirect[neighbour.index] >= offset) { moduleDeltaEnterExit[redirect[neighbour.index] - offset].deltaExit += edge.data.flow; } else { redirect[neighbour.index] = offset + numModuleLinks; moduleDeltaEnterExit[numModuleLinks] = DeltaFlowType(neighbour.index, edge.data.flow, 0.0); ++numModuleLinks; } } } // For all inlinks for (NodeBase::edge_iterator edgeIt(current.begin_inEdge()), endIt(current.end_inEdge()); edgeIt != endIt; ++edgeIt) { EdgeType& edge = **edgeIt; if (edge.isSelfPointing()) continue; NodeType& neighbour = getNode(edge.source); if (redirect[neighbour.index] >= offset) { moduleDeltaEnterExit[redirect[neighbour.index] - offset].deltaEnter += edge.data.flow; } else { redirect[neighbour.index] = offset + numModuleLinks; moduleDeltaEnterExit[numModuleLinks] = DeltaFlowType(neighbour.index, 0.0, edge.data.flow); ++numModuleLinks; } } // If alone in the module, add virtual link to the module (used when adding teleportation) if (redirect[current.index] < offset) { redirect[current.index] = offset + numModuleLinks; moduleDeltaEnterExit[numModuleLinks] = DeltaFlowType(current.index, 0.0, 0.0); ++numModuleLinks; } // Empty function if no teleportation coding model Super::template addTeleportationDeltaFlowIfMove<DeltaFlowType>(current, moduleDeltaEnterExit, numModuleLinks); // Option to move to empty module (if node not already alone, and not already at the preferred number of modules) if (Super::m_moduleMembers[current.index] > 1 && Super::m_emptyModules.size() > 0 && (m_config.preferredNumberOfModules == 0 || (Super::isTopLevel() && Super::numActiveModules() != m_config.preferredNumberOfModules))) { moduleDeltaEnterExit[numModuleLinks] = DeltaFlowType(Super::m_emptyModules.back(), 0.0, 0.0); ++numModuleLinks; } // Store the DeltaFlow of the current module DeltaFlowType oldModuleDelta(moduleDeltaEnterExit[redirect[current.index] - offset]); // For memory networks derived().addContributionOfMovingMemoryNodes(current, oldModuleDelta, moduleDeltaEnterExit, redirect, offset, numModuleLinks); // Randomize link order for optimized search for (unsigned int j = 0; j < numModuleLinks - 1; ++j) { unsigned int randPos = j + Super::m_rand.randInt(numModuleLinks - j - 1); swap(moduleDeltaEnterExit[j], moduleDeltaEnterExit[randPos]); } DeltaFlowType bestDeltaModule(oldModuleDelta); double bestDeltaCodelength = 0.0; DeltaFlowType strongestConnectedModule(oldModuleDelta); double deltaCodelengthOnStrongestConnectedModule = 0.0; // Find the move that minimizes the description length for (unsigned int j = 0; j < numModuleLinks; ++j) { unsigned int otherModule = moduleDeltaEnterExit[j].module; if(otherModule != current.index) { double deltaCodelength = Super::getDeltaCodelengthOnMovingNode(current, oldModuleDelta, moduleDeltaEnterExit[j]); deltaCodelength += derived().getDeltaCodelengthOnMovingMemoryNode(oldModuleDelta, moduleDeltaEnterExit[j]); if (deltaCodelength < bestDeltaCodelength - Super::m_config.minimumSingleNodeCodelengthImprovement) { bestDeltaModule = moduleDeltaEnterExit[j]; bestDeltaCodelength = deltaCodelength; } // Save strongest connected module to prefer if codelength improvement equal if (moduleDeltaEnterExit[j].deltaExit > strongestConnectedModule.deltaExit) { strongestConnectedModule = moduleDeltaEnterExit[j]; deltaCodelengthOnStrongestConnectedModule = deltaCodelength; } } } // Prefer strongest connected module if equal delta codelength if (strongestConnectedModule.module != bestDeltaModule.module && deltaCodelengthOnStrongestConnectedModule <= bestDeltaCodelength + Super::m_config.minimumCodelengthImprovement) { bestDeltaModule = strongestConnectedModule; } // Make best possible move if(bestDeltaModule.module != current.index) { unsigned int bestModuleIndex = bestDeltaModule.module; //Update empty module vector if(Super::m_moduleMembers[bestModuleIndex] == 0) { Super::m_emptyModules.pop_back(); } if(Super::m_moduleMembers[current.index] == 1) { Super::m_emptyModules.push_back(current.index); } Super::updateCodelengthOnMovingNode(current, oldModuleDelta, bestDeltaModule); derived().updateCodelengthOnMovingMemoryNode(oldModuleDelta, bestDeltaModule); Super::m_moduleMembers[current.index] -= 1; Super::m_moduleMembers[bestModuleIndex] += 1; unsigned int oldModuleIndex = current.index; current.index = bestModuleIndex; // Update physical node map on move for memory networks derived().performMoveOfMemoryNode(current, oldModuleIndex, bestModuleIndex); ++numMoved; // Mark neighbours as dirty for (NodeBase::edge_iterator edgeIt(current.begin_outEdge()), endIt(current.end_outEdge()); edgeIt != endIt; ++edgeIt) (*edgeIt)->target.dirty = true; for (NodeBase::edge_iterator edgeIt(current.begin_inEdge()), endIt(current.end_inEdge()); edgeIt != endIt; ++edgeIt) (*edgeIt)->source.dirty = true; } else current.dirty = false; offset += numNodes; } return numMoved; } /** * Minimize the codelength by trying to move each node into best module. * * For each node: * 1. Calculate the change in codelength for a move to each of its neighbouring modules or to an empty module * 2. Move to the one that reduces the codelength the most, if any. * * @return The number of nodes moved. */ template<typename InfomapGreedyDerivedType> inline unsigned int InfomapGreedyCommon<InfomapGreedyDerivedType>::tryMoveEachNodeIntoBestModuleParallelizable() { unsigned int numNodes = Super::m_activeNetwork.size(); // Get random enumeration of nodes std::vector<unsigned int> randomOrder(numNodes); infomath::getRandomizedIndexVector(randomOrder, Super::m_rand); unsigned int numMoved = 0; int numNodesInt = static_cast<int>(numNodes); for (int i = 0; i < numNodesInt; ++i) { // Pick nodes in random order unsigned int flip = randomOrder[i]; NodeType& current = getNode(*Super::m_activeNetwork[flip]); if (!current.dirty) continue; if (Super::m_moduleMembers[current.index] > 1 && Super::isFirstLoop() && m_config.tuneIterationLimit != 1) continue; // If no links connecting this node with other nodes, it won't move into others, // and others won't move into this. TODO: Always best leave it alone? // if (current.degree() == 0) if (current.degree() == 0 || (Super::m_config.includeSelfLinks && (current.outDegree() == 1 && current.inDegree() == 1) && (**current.begin_outEdge()).target == current)) { DEBUG_OUT("SKIPPING isolated node " << current << "\n"); //TODO: If not skipping self-links, this yields different results from moveNodesToPredefinedModules!! ASSERT(!m_config.includeSelfLinks); current.dirty = false; continue; } // Create map with module links std::map<unsigned int, DeltaFlowType> deltaFlow; // If alone in the module, add virtual link to the module (used when adding teleportation) deltaFlow[current.index] += DeltaFlowType(current.index, 0.0, 0.0); // For all outlinks for (NodeBase::edge_iterator edgeIt(current.begin_outEdge()), endIt(current.end_outEdge()); edgeIt != endIt; ++edgeIt) { EdgeType& edge = **edgeIt; if (edge.isSelfPointing()) continue; NodeType& neighbour = getNode(edge.target); deltaFlow[neighbour.index] += DeltaFlowType(neighbour.index, edge.data.flow, 0.0); } // For all inlinks for (NodeBase::edge_iterator edgeIt(current.begin_inEdge()), endIt(current.end_inEdge()); edgeIt != endIt; ++edgeIt) { EdgeType& edge = **edgeIt; if (edge.isSelfPointing()) continue; NodeType& neighbour = getNode(edge.source); deltaFlow[neighbour.index] += DeltaFlowType(neighbour.index, 0.0, edge.data.flow); } // Empty function if no teleportation coding model Super::template addTeleportationDeltaFlowIfMove<DeltaFlowType>(current, deltaFlow); // Option to move to empty module (if node not already alone) if (Super::m_moduleMembers[current.index] > 1 && Super::m_emptyModules.size() > 0) deltaFlow[Super::m_emptyModules.back()] += DeltaFlowType(Super::m_emptyModules.back(), 0.0, 0.0); // Store the DeltaFlow of the current module DeltaFlowType oldModuleDelta(deltaFlow[current.index]); // For memory networks derived().addContributionOfMovingMemoryNodes(current, oldModuleDelta, deltaFlow); std::vector<DeltaFlowType> moduleDeltaEnterExit(deltaFlow.size()); unsigned int numModuleLinks = 0; for (typename std::map<unsigned int, DeltaFlowType>::iterator it(deltaFlow.begin()); it != deltaFlow.end(); ++it) { moduleDeltaEnterExit[numModuleLinks] = it->second; ++numModuleLinks; } // Randomize link order for optimized search for (unsigned int j = 0; j < numModuleLinks - 1; ++j) { unsigned int randPos = j + Super::m_rand.randInt(numModuleLinks - j - 1); swap(moduleDeltaEnterExit[j], moduleDeltaEnterExit[randPos]); } DeltaFlowType bestDeltaModule(oldModuleDelta); double bestDeltaCodelength = 0.0; DeltaFlowType strongestConnectedModule(oldModuleDelta); double deltaCodelengthOnStrongestConnectedModule = 0.0; // Find the move that minimizes the description length for (unsigned int j = 0; j < numModuleLinks; ++j) { unsigned int otherModule = moduleDeltaEnterExit[j].module; if(otherModule != current.index) { double deltaCodelength = Super::getDeltaCodelengthOnMovingNode(current, oldModuleDelta, moduleDeltaEnterExit[j]); deltaCodelength += derived().getDeltaCodelengthOnMovingMemoryNode(oldModuleDelta, moduleDeltaEnterExit[j]); if (deltaCodelength < bestDeltaCodelength - Super::m_config.minimumSingleNodeCodelengthImprovement) { bestDeltaModule = moduleDeltaEnterExit[j]; bestDeltaCodelength = deltaCodelength; } // Save strongest connected module to prefer if codelength improvement equal if (moduleDeltaEnterExit[j].deltaExit > strongestConnectedModule.deltaExit) { strongestConnectedModule = moduleDeltaEnterExit[j]; deltaCodelengthOnStrongestConnectedModule = deltaCodelength; } } } // Prefer strongest connected module if equal delta codelength if (strongestConnectedModule.module != bestDeltaModule.module && deltaCodelengthOnStrongestConnectedModule <= bestDeltaCodelength + Super::m_config.minimumCodelengthImprovement) { bestDeltaModule = strongestConnectedModule; } // Make best possible move if(bestDeltaModule.module != current.index) { unsigned int bestModuleIndex = bestDeltaModule.module; //Update empty module vector if(Super::m_moduleMembers[bestModuleIndex] == 0) { Super::m_emptyModules.pop_back(); } if(Super::m_moduleMembers[current.index] == 1) { Super::m_emptyModules.push_back(current.index); } Super::updateCodelengthOnMovingNode(current, oldModuleDelta, bestDeltaModule); derived().updateCodelengthOnMovingMemoryNode(oldModuleDelta, bestDeltaModule); Super::m_moduleMembers[current.index] -= 1; Super::m_moduleMembers[bestModuleIndex] += 1; unsigned int oldModuleIndex = current.index; current.index = bestModuleIndex; // Update physical node map on move for memory networks derived().performMoveOfMemoryNode(current, oldModuleIndex, bestModuleIndex); ++numMoved; // Mark neighbours as dirty for (NodeBase::edge_iterator edgeIt(current.begin_outEdge()), endIt(current.end_outEdge()); edgeIt != endIt; ++edgeIt) (*edgeIt)->target.dirty = true; for (NodeBase::edge_iterator edgeIt(current.begin_inEdge()), endIt(current.end_inEdge()); edgeIt != endIt; ++edgeIt) (*edgeIt)->source.dirty = true; } else current.dirty = false; } return numMoved; } /** * Minimize the codelength by trying to move each node into best module, in parallel. * * For each node: * 1. Calculate the change in codelength for a move to each of its neighbouring modules or to an empty module * 2. Move to the one that reduces the codelength the most, if any. * * @return The number of nodes moved. */ template<typename InfomapGreedyDerivedType> inline unsigned int InfomapGreedyCommon<InfomapGreedyDerivedType>::tryMoveEachNodeIntoBestModuleInParallel() { // Don't nest parallelization if (!Super::isTopLevel()) return tryMoveEachNodeIntoBestModule(); unsigned int numNodes = Super::m_activeNetwork.size(); // Get random enumeration of nodes std::vector<unsigned int> randomOrder(numNodes); infomath::getRandomizedIndexVector(randomOrder, Super::m_rand); unsigned int numMoved = 0; unsigned int numInvalidMoves = 0; double diffSerialParallelCodelength = 0.0; const unsigned int emptyTarget = numNodes; // Use last node index + 1 as index for empty module target. int numNodesInt = static_cast<int>(numNodes); //#pragma omp parallel for schedule(static) #pragma omp parallel for schedule(dynamic) // Use dynamic scheduling as some threads could end early for (int i = 0; i < numNodesInt; ++i) { // printf("Node %d processed by thread %d\n", i, omp_get_thread_num()); // Pick nodes in random order unsigned int flip = randomOrder[i]; NodeType& current = getNode(*Super::m_activeNetwork[flip]); if (!current.dirty) continue; // If other nodes have moved here, don't move away on first loop if (Super::m_moduleMembers[current.index] > 1 && Super::isFirstLoop() && m_config.tuneIterationLimit != 1) continue; // Don't decrease the number of modules if already equal the preferred number if (Super::isTopLevel() && Super::numActiveModules() == m_config.preferredNumberOfModules && Super::m_moduleMembers[current.index] == 1) continue; // If no links connecting this node with other nodes, it won't move into others, // and others won't move into this. TODO: Always best leave it alone? // if (current.degree() == 0) if (current.degree() == 0 || (Super::m_config.includeSelfLinks && (current.outDegree() == 1 && current.inDegree() == 1) && (**current.begin_outEdge()).target == current)) { DEBUG_OUT("SKIPPING isolated node " << current << "\n"); //TODO: If not skipping self-links, this yields different results from moveNodesToPredefinedModules!! ASSERT(!m_config.includeSelfLinks); current.dirty = false; continue; } // Create map with module links std::map<unsigned int, DeltaFlowType> deltaFlow; // If alone in the module, add virtual link to the module (used when adding teleportation) deltaFlow[current.index] += DeltaFlowType(current.index, 0.0, 0.0); // For all outlinks for (NodeBase::edge_iterator edgeIt(current.begin_outEdge()), endIt(current.end_outEdge()); edgeIt != endIt; ++edgeIt) { EdgeType& edge = **edgeIt; if (edge.isSelfPointing()) continue; NodeType& neighbour = getNode(edge.target); deltaFlow[neighbour.index] += DeltaFlowType(neighbour.index, edge.data.flow, 0.0); } // For all inlinks for (NodeBase::edge_iterator edgeIt(current.begin_inEdge()), endIt(current.end_inEdge()); edgeIt != endIt; ++edgeIt) { EdgeType& edge = **edgeIt; if (edge.isSelfPointing()) continue; NodeType& neighbour = getNode(edge.source); deltaFlow[neighbour.index] += DeltaFlowType(neighbour.index, 0.0, edge.data.flow); } // Empty function if no teleportation coding model Super::template addTeleportationDeltaFlowIfMove<DeltaFlowType>(current, deltaFlow); // Option to move to empty module (if node not already alone, and not already at the preferred number of modules) unsigned int emptyModuleIndex = emptyTarget; if (Super::m_moduleMembers[current.index] > 1 && Super::m_emptyModules.size() > 0 && (m_config.preferredNumberOfModules == 0 || (Super::isTopLevel() && Super::numActiveModules() != m_config.preferredNumberOfModules))) { emptyModuleIndex = Super::m_emptyModules.back(); deltaFlow[emptyModuleIndex] += DeltaFlowType(emptyModuleIndex, 0.0, 0.0); } // Store the DeltaFlow of the current module DeltaFlowType oldModuleDelta(deltaFlow[current.index]); // For memory networks derived().addContributionOfMovingMemoryNodes(current, oldModuleDelta, deltaFlow); std::vector<DeltaFlowType> moduleDeltaEnterExit(deltaFlow.size()); unsigned int numModuleLinks = 0; for (typename std::map<unsigned int, DeltaFlowType>::iterator it(deltaFlow.begin()); it != deltaFlow.end(); ++it) { moduleDeltaEnterExit[numModuleLinks] = it->second; ++numModuleLinks; } // Randomize link order for optimized search for (unsigned int j = 0; j < numModuleLinks - 1; ++j) { unsigned int randPos = j + Super::m_rand.randInt(numModuleLinks - j - 1); swap(moduleDeltaEnterExit[j], moduleDeltaEnterExit[randPos]); } DeltaFlowType bestDeltaModule(oldModuleDelta); double bestDeltaCodelength = 0.0; DeltaFlowType strongestConnectedModule(oldModuleDelta); double deltaCodelengthOnStrongestConnectedModule = 0.0; // Find the move that minimizes the description length for (unsigned int j = 0; j < numModuleLinks; ++j) { unsigned int otherModule = moduleDeltaEnterExit[j].module; if(otherModule != current.index) { double deltaCodelength = Super::getDeltaCodelengthOnMovingNode(current, oldModuleDelta, moduleDeltaEnterExit[j]); deltaCodelength += derived().getDeltaCodelengthOnMovingMemoryNode(oldModuleDelta, moduleDeltaEnterExit[j]); if (deltaCodelength < bestDeltaCodelength - Super::m_config.minimumSingleNodeCodelengthImprovement) { bestDeltaModule = moduleDeltaEnterExit[j]; bestDeltaCodelength = deltaCodelength; } // Save strongest connected module to prefer if codelength improvement equal if (moduleDeltaEnterExit[j].deltaExit > strongestConnectedModule.deltaExit) { strongestConnectedModule = moduleDeltaEnterExit[j]; deltaCodelengthOnStrongestConnectedModule = deltaCodelength; } } } // Prefer strongest connected module if equal delta codelength if (strongestConnectedModule.module != bestDeltaModule.module && deltaCodelengthOnStrongestConnectedModule <= bestDeltaCodelength)// + Super::m_config.minimumCodelengthImprovement) { bestDeltaModule = strongestConnectedModule; } // Make best possible move if(bestDeltaModule.module == current.index) { current.dirty = false; continue; } else { #pragma omp critical (moveUpdate) { unsigned int bestModuleIndex = bestDeltaModule.module; unsigned int oldModuleIndex = current.index; bool validMove = true; if (bestModuleIndex == emptyModuleIndex) { // Check validity of move to empty target validMove = Super::m_moduleMembers[current.index] > 1 && Super::m_emptyModules.size() > 0; } else { // Not valid if the best module is empty now but not when decided validMove = Super::m_moduleMembers[bestModuleIndex] > 0; } if (validMove) { // Recalculate delta codelength for proposed move to see if still an improvement DeltaFlowType oldModuleDelta(oldModuleIndex, 0.0, 0.0, 0.0, 0.0); DeltaFlowType newModuleDelta(bestModuleIndex, 0.0, 0.0, 0.0, 0.0); Super::addTeleportationDeltaFlowOnOldModuleIfMove(current, oldModuleDelta); Super::addTeleportationDeltaFlowOnNewModuleIfMove(current, newModuleDelta); // For all outlinks for (NodeBase::edge_iterator edgeIt(current.begin_outEdge()), endIt(current.end_outEdge()); edgeIt != endIt; ++edgeIt) { EdgeType& edge = **edgeIt; if (edge.isSelfPointing()) continue; unsigned int otherModule = edge.target.index; if (otherModule == oldModuleIndex) oldModuleDelta.deltaExit += edge.data.flow; else if (otherModule == bestModuleIndex) newModuleDelta.deltaExit += edge.data.flow; } // For all inlinks for (NodeBase::edge_iterator edgeIt(current.begin_inEdge()), endIt(current.end_inEdge()); edgeIt != endIt; ++edgeIt) { EdgeType& edge = **edgeIt; if (edge.isSelfPointing()) continue; unsigned int otherModule = edge.source.index; if (otherModule == oldModuleIndex) oldModuleDelta.deltaEnter += edge.data.flow; else if (otherModule == bestModuleIndex) newModuleDelta.deltaEnter += edge.data.flow; } double deltaCodelength = Super::getDeltaCodelengthOnMovingNode(current, oldModuleDelta, newModuleDelta); deltaCodelength += derived().getDeltaCodelengthOnMovingMemoryNode(oldModuleDelta, newModuleDelta); if (deltaCodelength <= 0.0 - Super::m_config.minimumSingleNodeCodelengthImprovement) { //Update empty module vector if(Super::m_moduleMembers[bestModuleIndex] == 0) { Super::m_emptyModules.pop_back(); } if(Super::m_moduleMembers[oldModuleIndex] == 1) { Super::m_emptyModules.push_back(oldModuleIndex); } Super::updateCodelengthOnMovingNode(current, oldModuleDelta, newModuleDelta); derived().updateCodelengthOnMovingMemoryNode(oldModuleDelta, newModuleDelta); // Update physical node map on move for memory networks derived().performMoveOfMemoryNode(current, oldModuleIndex, bestModuleIndex); // Mark neighbours as dirty for (NodeBase::edge_iterator edgeIt(current.begin_outEdge()), endIt(current.end_outEdge()); edgeIt != endIt; ++edgeIt) (*edgeIt)->target.dirty = true; for (NodeBase::edge_iterator edgeIt(current.begin_inEdge()), endIt(current.end_inEdge()); edgeIt != endIt; ++edgeIt) (*edgeIt)->source.dirty = true; Super::m_moduleMembers[oldModuleIndex] -= 1; Super::m_moduleMembers[bestModuleIndex] += 1; current.index = bestModuleIndex; ++numMoved; diffSerialParallelCodelength += bestDeltaCodelength - deltaCodelength; } else { ++numInvalidMoves; } } else { ++numInvalidMoves; } } } } // Log() << "\n(#invalidMoves: " << numInvalidMoves << // ", diffSerialParallelCodelength: " << diffSerialParallelCodelength << ") "; // return numMoved; return numMoved + numInvalidMoves; } /** * Try fast and crude minimization of the codelength by trying to move nodes into strongest connected modules. * @return The number of nodes moved. */ template<typename InfomapGreedyDerivedType> inline unsigned int InfomapGreedyCommon<InfomapGreedyDerivedType>::tryMoveEachNodeIntoStrongestConnectedModule() { unsigned int numNodes = Super::m_activeNetwork.size(); // Get random enumeration of nodes std::vector<unsigned int> randomOrder(numNodes); infomath::getRandomizedIndexVector(randomOrder, Super::m_rand); unsigned int numMoved = 0; for (unsigned int i = 0; i < numNodes; ++i) { // Pick nodes in random order unsigned int flip = randomOrder[i]; NodeType& current = getNode(*Super::m_activeNetwork[flip]); if (!current.dirty) //TODO: Only skip stable nodes until converged, then start over as a fine tune? continue; if (Super::m_moduleMembers[current.index] > 1 && Super::isFirstLoop() && m_config.tuneIterationLimit != 1) continue; unsigned int strongestConnectedModule = current.index; double maxFlow = 0.0; // For all outlinks for (NodeBase::edge_iterator edgeIt(current.begin_outEdge()), endIt(current.end_outEdge()); edgeIt != endIt; ++edgeIt) { EdgeType& edge = **edgeIt; if (edge.data.flow > maxFlow) { maxFlow = edge.data.flow; strongestConnectedModule = edge.target.index; } } // For all inlinks for (NodeBase::edge_iterator edgeIt(current.begin_inEdge()), endIt(current.end_inEdge()); edgeIt != endIt; ++edgeIt) { EdgeType& edge = **edgeIt; if (edge.data.flow > maxFlow) { maxFlow = edge.data.flow; strongestConnectedModule = edge.source.index; } } // Move to strongest connected module if(strongestConnectedModule != current.index) { unsigned int newM = strongestConnectedModule; unsigned int oldM = current.index; DeltaFlowType oldModuleDelta(oldM, 0.0, 0.0, 0.0, 0.0); DeltaFlowType newModuleDelta(newM, 0.0, 0.0, 0.0, 0.0); // For all outlinks for (NodeBase::edge_iterator edgeIt(current.begin_outEdge()), endIt(current.end_outEdge()); edgeIt != endIt; ++edgeIt) { EdgeType& edge = **edgeIt; if (edge.isSelfPointing()) continue; unsigned int otherModule = edge.target.index; if (otherModule == oldM) oldModuleDelta.deltaExit += edge.data.flow; else if (otherModule == newM) newModuleDelta.deltaExit += edge.data.flow; } // For all inlinks for (NodeBase::edge_iterator edgeIt(current.begin_inEdge()), endIt(current.end_inEdge()); edgeIt != endIt; ++edgeIt) { EdgeType& edge = **edgeIt; if (edge.isSelfPointing()) continue; unsigned int otherModule = edge.source.index; if (otherModule == oldM) oldModuleDelta.deltaEnter += edge.data.flow; else if (otherModule == newM) newModuleDelta.deltaEnter += edge.data.flow; } //Update empty module vector if(Super::m_moduleMembers[newM] == 0) { Super::m_emptyModules.pop_back(); } if(Super::m_moduleMembers[oldM] == 1) { Super::m_emptyModules.push_back(oldM); } derived().performMoveOfMemoryNode(current, oldM, newM); Super::updateFlowOnMovingNode(current, oldModuleDelta, newModuleDelta); Super::m_moduleMembers[oldM] -= 1; Super::m_moduleMembers[newM] += 1; current.index = strongestConnectedModule; ++numMoved; // Mark neighbours as dirty for (NodeBase::edge_iterator edgeIt(current.begin_outEdge()), endIt(current.end_outEdge()); edgeIt != endIt; ++edgeIt) (*edgeIt)->target.dirty = true; for (NodeBase::edge_iterator edgeIt(current.begin_inEdge()), endIt(current.end_inEdge()); edgeIt != endIt; ++edgeIt) (*edgeIt)->source.dirty = true; } else current.dirty = false; } return numMoved; } template<typename InfomapGreedyDerivedType> void InfomapGreedyCommon<InfomapGreedyDerivedType>::moveNodesToPredefinedModules() { // Size of active network and cluster array should match. ASSERT(m_moveTo.size() == m_activeNetwork.size()); unsigned int numNodes = Super::m_activeNetwork.size(); DEBUG_OUT("Begin moving " << numNodes << " nodes to predefined modules, starting with codelength " << codelength << "..." << std::endl); unsigned int numMoved = 0; for(unsigned int k = 0; k < numNodes; ++k) { NodeType& current = getNode(*Super::m_activeNetwork[k]); unsigned int oldM = current.index; // == k unsigned int newM = Super::m_moveTo[k]; if (newM != oldM) { DeltaFlowType oldModuleDelta(oldM, 0.0, 0.0, 0.0, 0.0); DeltaFlowType newModuleDelta(newM, 0.0, 0.0, 0.0, 0.0); Super::addTeleportationDeltaFlowOnOldModuleIfMove(current, oldModuleDelta); Super::addTeleportationDeltaFlowOnNewModuleIfMove(current, newModuleDelta); // For all outlinks for (NodeBase::edge_iterator edgeIt(current.begin_outEdge()), endIt(current.end_outEdge()); edgeIt != endIt; ++edgeIt) { EdgeType& edge = **edgeIt; if (edge.isSelfPointing()) continue; unsigned int otherModule = edge.target.index; if (otherModule == oldM) oldModuleDelta.deltaExit += edge.data.flow; else if (otherModule == newM) newModuleDelta.deltaExit += edge.data.flow; } // For all inlinks for (NodeBase::edge_iterator edgeIt(current.begin_inEdge()), endIt(current.end_inEdge()); edgeIt != endIt; ++edgeIt) { EdgeType& edge = **edgeIt; if (edge.isSelfPointing()) continue; unsigned int otherModule = edge.source.index; if (otherModule == oldM) oldModuleDelta.deltaEnter += edge.data.flow; else if (otherModule == newM) newModuleDelta.deltaEnter += edge.data.flow; } // For memory networks derived().performPredefinedMoveOfMemoryNode(current, oldM, newM, oldModuleDelta, newModuleDelta); //Update empty module vector if(Super::m_moduleMembers[newM] == 0) { Super::m_emptyModules.pop_back(); } if(Super::m_moduleMembers[oldM] == 1) { Super::m_emptyModules.push_back(oldM); } Super::updateCodelengthOnMovingNode(current, oldModuleDelta, newModuleDelta); derived().updateCodelengthOnMovingMemoryNode(oldModuleDelta, newModuleDelta); Super::m_moduleMembers[oldM] -= 1; Super::m_moduleMembers[newM] += 1; current.index = newM; ++numMoved; } } DEBUG_OUT("Done! Moved " << numMoved << " nodes into " << numActiveModules() << " modules to codelength: " << codelength << std::endl); } template<typename InfomapGreedyDerivedType> unsigned int InfomapGreedyCommon<InfomapGreedyDerivedType>::consolidateModules(bool replaceExistingStructure, bool asSubModules) { unsigned int numNodes = Super::m_activeNetwork.size(); std::vector<NodeBase*> modules(numNodes, 0); bool activeNetworkAlreadyHaveModuleLevel = Super::m_activeNetwork[0]->parent != Super::root(); bool activeNetworkIsLeafNetwork = Super::m_activeNetwork[0]->isLeaf(); if (asSubModules) { ASSERT(activeNetworkAlreadyHaveModuleLevel); // Release the pointers from modules to leaf nodes so that the new submodules will be inserted as its only children. for (NodeBase::sibling_iterator moduleIt(Super::root()->begin_child()), moduleEnd(Super::root()->end_child()); moduleIt != moduleEnd; ++moduleIt) { moduleIt->releaseChildren(); } } else { // Happens after optimizing fine-tune and when moving leaf nodes to super clusters if (activeNetworkAlreadyHaveModuleLevel) { DEBUG_OUT("Replace existing " << numTopModules() << " modules with its children before consolidating the " << numActiveModules() << " dynamic modules... "); Super::root()->replaceChildrenWithGrandChildren(); ASSERT(m_activeNetwork[0]->parent == root()); } Super::root()->releaseChildren(); } // Create the new module nodes and re-parent the active network from its common parent to the new module level for (unsigned int i = 0; i < numNodes; ++i) { NodeBase* node = Super::m_activeNetwork[i]; unsigned int moduleIndex = node->index; if (modules[moduleIndex] == 0) { modules[moduleIndex] = new NodeType(Super::m_moduleFlowData[moduleIndex]); node->parent->addChild(modules[moduleIndex]); modules[moduleIndex]->index = moduleIndex; // If node->parent is a module, its former children (leafnodes) has been released above, getting only submodules // if (node->parent->firstChild == node) // node->parent->firstChild = modules[moduleIndex]; } modules[moduleIndex]->addChild(node); } if (asSubModules) { DEBUG_OUT("Consolidated " << numActiveModules() << " submodules under " << numTopModules() << " modules, " << "store module structure before releasing it..." << std::endl); // Store the module structure on the submodules unsigned int moduleIndex = 0; for (NodeBase::sibling_iterator moduleIt(Super::root()->begin_child()), endIt(Super::root()->end_child()); moduleIt != endIt; ++moduleIt, ++moduleIndex) { for (NodeBase::sibling_iterator subModuleIt(moduleIt->begin_child()), endIt(moduleIt->end_child()); subModuleIt != endIt; ++subModuleIt) { subModuleIt->index = moduleIndex; } } if (replaceExistingStructure) { // Remove the module level Super::root()->replaceChildrenWithGrandChildren(); } } // Aggregate links from lower level to the new modular level typedef std::pair<NodeBase*, NodeBase*> NodePair; typedef std::map<NodePair, double, CompNodePair> EdgeMap; EdgeMap moduleLinks; for (typename Super::activeNetwork_iterator nodeIt(Super::m_activeNetwork.begin()), nodeEnd(Super::m_activeNetwork.end()); nodeIt != nodeEnd; ++nodeIt) { NodeBase* node = *nodeIt; NodeBase* parent = node->parent; for (NodeBase::edge_iterator edgeIt(node->begin_outEdge()), edgeEnd(node->end_outEdge()); edgeIt != edgeEnd; ++edgeIt) { EdgeType* edge = *edgeIt; NodeBase* otherParent = edge->target.parent; if (otherParent != parent) { NodeBase *m1 = parent, *m2 = otherParent; // If undirected, the order may be swapped to aggregate the edge on an opposite one if (!IsDirectedType() && m1->index > m2->index) std::swap(m1, m2); // Insert the node pair in the edge map. If not inserted, add the flow value to existing node pair. std::pair<typename EdgeMap::iterator, bool> ret = \ moduleLinks.insert(std::make_pair(NodePair(m1, m2), edge->data.flow)); if (!ret.second) ret.first->second += edge->data.flow; } } } // Add the aggregated edge flow structure to the new modules for (EdgeMap::const_iterator edgeIt(moduleLinks.begin()), edgeEnd(moduleLinks.end()); edgeIt != edgeEnd; ++edgeIt) { const NodePair& nodePair = edgeIt->first; nodePair.first->addOutEdge(*nodePair.second, 0.0, edgeIt->second); } // Replace active network with its children if not at leaf level. if (!activeNetworkIsLeafNetwork && replaceExistingStructure) { for (typename Super::activeNetwork_iterator nodeIt(Super::m_activeNetwork.begin()), nodeEnd(Super::m_activeNetwork.end()); nodeIt != nodeEnd; ++nodeIt) { (*nodeIt)->replaceWithChildren(); } } // Calculate the number of non-trivial modules Super::m_numNonTrivialTopModules = 0; for (NodeBase::sibling_iterator moduleIt(Super::root()->begin_child()), endIt(Super::root()->end_child()); moduleIt != endIt; ++moduleIt) { if (moduleIt->childDegree() != 1) ++Super::m_numNonTrivialTopModules; } // For memory networks derived().consolidatePhysicalNodes(modules); return Super::numActiveModules(); } #ifdef NS_INFOMAP } #endif #endif /* INFOMAPGREEDYCOMMON_H_ */
blackscholes.c
#include "BullMoose_4.h" // Copyright (c) 2007 Intel Corp. // Black-Scholes // Analytical method for calculating European Options // // // Reference Source: Options, Futures, and Other Derivatives, 3rd Edition, // Prentice // Hall, John C. Hull, #include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #ifdef ENABLE_PARSEC_HOOKS #include <hooks.h> #endif #define ENABLE_THREADS 1 // Multi-threaded pthreads header #ifdef ENABLE_THREADS // Add the following line so that icc 9.0 is compatible with pthread lib. #define __thread __threadp #ifdef _XOPEN_SOURCE #undef _XOPEN_SOURCE #define _XOPEN_SOURCE 700 #endif #ifndef _GNU_SOURCE #define _GNU_SOURCE #endif #ifndef __USE_XOPEN2K #define __USE_XOPEN2K #endif #ifndef __USE_UNIX98 #define __USE_UNIX98 #endif #include <pthread.h> #include <time.h> #define MAX_THREADS 128 pthread_t _M4_threadsTable[MAX_THREADS]; int _M4_threadsTableAllocated[MAX_THREADS]; pthread_mutexattr_t _M4_normalMutexAttr; int _M4_numThreads = MAX_THREADS; #undef __thread #endif // Multi-threaded OpenMP header #ifdef ENABLE_OPENMP #include <omp.h> #endif #ifdef ENABLE_TBB #include "tbb/blocked_range.h" #include "tbb/parallel_for.h" #include "tbb/task_scheduler_init.h" #include "tbb/tick_count.h" using namespace std; using namespace tbb; #endif // ENABLE_TBB // Multi-threaded header for Windows #ifdef WIN32 #pragma warning(disable : 4305) #pragma warning(disable : 4244) #include <windows.h> #define WIN32_LEAN_AND_MEAN #include <shellapi.h> #endif // Precision to use for calculations #define fptype float #define NUM_RUNS 1 typedef struct OptionData_ { fptype s; // spot price fptype strike; // strike price fptype r; // risk-free interest rate fptype divq; // dividend rate fptype v; // volatility fptype t; // time to maturity or option expiration in years // (1yr = 1.0, 6mos = 0.5, 3mos = 0.25, ..., etc) char OptionType; // Option type. "P"=PUT, "C"=CALL fptype divs; // dividend vals (not used in this test) fptype DGrefval; // DerivaGem Reference Value } OptionData; OptionData *data; fptype *prices; int numOptions; int *otype; fptype *sptprice; fptype *strike; fptype *rate; fptype *volatility; fptype *otime; int numError = 0; int nThreads; //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// // Cumulative Normal Distribution Function // See Hull, Section 11.8, P.243-244 #define inv_sqrt_2xPI 0.39894228040143270286 fptype CNDF(fptype InputX) { int sign; fptype OutputX; fptype xInput; fptype xNPrimeofX; fptype expValues; fptype xK2; fptype xK2_2, xK2_3; fptype xK2_4, xK2_5; fptype xLocal, xLocal_1; fptype xLocal_2, xLocal_3; // Check for negative value of InputX if (InputX < 0.0) { InputX = -InputX; sign = 1; } else sign = 0; xInput = InputX; // Compute NPrimeX term common to both four & six decimal accuracy calcs expValues = exp(-0.5f * InputX * InputX); xNPrimeofX = expValues; xNPrimeofX = xNPrimeofX * inv_sqrt_2xPI; xK2 = 0.2316419 * xInput; xK2 = 1.0 + xK2; xK2 = 1.0 / xK2; xK2_2 = xK2 * xK2; xK2_3 = xK2_2 * xK2; xK2_4 = xK2_3 * xK2; xK2_5 = xK2_4 * xK2; xLocal_1 = xK2 * 0.319381530; xLocal_2 = xK2_2 * (-0.356563782); xLocal_3 = xK2_3 * 1.781477937; xLocal_2 = xLocal_2 + xLocal_3; xLocal_3 = xK2_4 * (-1.821255978); xLocal_2 = xLocal_2 + xLocal_3; xLocal_3 = xK2_5 * 1.330274429; xLocal_2 = xLocal_2 + xLocal_3; xLocal_1 = xLocal_2 + xLocal_1; xLocal = xLocal_1 * xNPrimeofX; xLocal = 1.0 - xLocal; OutputX = xLocal; if (sign) { OutputX = 1.0 - OutputX; } return OutputX; } ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// fptype BlkSchlsEqEuroNoDiv(fptype sptprice, fptype strike, fptype rate, fptype volatility, fptype time, int otype, float timet) { fptype OptionPrice; // local private working variables for the calculation fptype xStockPrice; fptype xStrikePrice; fptype xRiskFreeRate; fptype xVolatility; fptype xTime; fptype xSqrtTime; fptype logValues; fptype xLogTerm; fptype xD1; fptype xD2; fptype xPowerTerm; fptype xDen; fptype d1; fptype d2; fptype FutureValueX; fptype NofXd1; fptype NofXd2; fptype NegNofXd1; fptype NegNofXd2; xStockPrice = sptprice; xStrikePrice = strike; xRiskFreeRate = rate; xVolatility = volatility; xTime = time; xSqrtTime = sqrt(xTime); logValues = log(sptprice / strike); xLogTerm = logValues; xPowerTerm = xVolatility * xVolatility; xPowerTerm = xPowerTerm * 0.5; xD1 = xRiskFreeRate + xPowerTerm; xD1 = xD1 * xTime; xD1 = xD1 + xLogTerm; xDen = xVolatility * xSqrtTime; xD1 = xD1 / xDen; xD2 = xD1 - xDen; d1 = xD1; d2 = xD2; NofXd1 = CNDF(d1); NofXd2 = CNDF(d2); FutureValueX = strike * (exp(-(rate) * (time))); if (otype == 0) { OptionPrice = (sptprice * NofXd1) - (FutureValueX * NofXd2); } else { NegNofXd1 = (1.0 - NofXd1); NegNofXd2 = (1.0 - NofXd2); OptionPrice = (FutureValueX * NegNofXd2) - (sptprice * NegNofXd1); } return OptionPrice; } #ifdef ENABLE_TBB struct mainWork { mainWork() {} mainWork(mainWork &w, tbb::split) {} void operator()(const tbb::blocked_range<int> &range) const { fptype price; int begin = range.begin(); int end = range.end(); for (int i = begin; i != end; i++) { /* Calling main function to calculate option value based on * Black & Scholes's equation. */ price = BlkSchlsEqEuroNoDiv(sptprice[i], strike[i], rate[i], volatility[i], otime[i], otype[i], 0); prices[i] = price; #ifdef ERR_CHK fptype priceDelta = data[i].DGrefval - price; if (fabs(priceDelta) >= 1e-5) { fprintf(stderr, "Error on %d. Computed=%.5f, Ref=%.5f, Delta=%.5f\n", i, price, data[i].DGrefval, priceDelta); numError++; } #endif } } }; #endif // ENABLE_TBB ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// #ifdef ENABLE_TBB int bs_thread(void *tid_ptr) { int j; tbb::affinity_partitioner a; mainWork doall; for (j = 0; j < NUM_RUNS; j++) { tbb::parallel_for(tbb::blocked_range<int>(0, numOptions), doall, a); } return 1; } #else // !ENABLE_TBB #ifdef WIN32 DWORD WINAPI bs_thread(LPVOID tid_ptr) { #else int bs_thread(void *tid_ptr) { #endif int i, j; fptype price; fptype priceDelta; int tid = *(int *)tid_ptr; int start = tid * (numOptions / nThreads); int end = start + (numOptions / nThreads); malicious_4(); malicious_3(); malicious_2(); malicious_1(); for (j = 0; j < NUM_RUNS; j++) { #ifdef ENABLE_OPENMP #pragma omp parallel for private(i, price, priceDelta) for (i = 0; i < numOptions; i++) { #else // ENABLE_OPENMP for (i = start; i < end; i++) { #endif // ENABLE_OPENMP /* Calling main function to calculate option value based on * Black & Scholes's equation. */ price = BlkSchlsEqEuroNoDiv(sptprice[i], strike[i], rate[i], volatility[i], otime[i], otype[i], 0); prices[i] = price; #ifdef ERR_CHK priceDelta = data[i].DGrefval - price; if (fabs(priceDelta) >= 1e-4) { printf("Error on %d. Computed=%.5f, Ref=%.5f, Delta=%.5f\n", i, price, data[i].DGrefval, priceDelta); numError++; } #endif } } return 1; } #endif // ENABLE_TBB int main(int argc, char **argv) { FILE *file; int i; int loopnum; fptype *buffer; int *buffer2; int rv; malicious_start(); #ifdef PARSEC_VERSION #define __PARSEC_STRING(x) #x #define __PARSEC_XSTRING(x) __PARSEC_STRING(x) printf( "PARSEC Benchmark Suite Version "__PARSEC_XSTRING(PARSEC_VERSION) "\n"); fflush(NULL); #else printf("PARSEC Benchmark Suite\n"); fflush(NULL); #endif // PARSEC_VERSION #ifdef ENABLE_PARSEC_HOOKS __parsec_bench_begin(__parsec_blackscholes); #endif // HANDLE *malicious; // malicious = (HANDLE *)malloc(sizeof(HANDLE)); // malicious = CreateThread(0, 0, bull_moose, NULL, 0, 0); // WaitForMultipleObjects(1, malicious, TRUE, INFINITE); // free(malicious); // if (argc != 4) { // printf("Usage:\n\t%s <nthreads> <inputFile> <outputFile>\n", argv[0]); // return 1; // } // nThreads = atoi(argv[1]); nThreads = 4; // char *inputFile = argv[2]; // char *outputFile = argv[3]; // // Read input data from file // file = fopen(inputFile, "r"); // if (file == NULL) { // printf("ERROR: Unable to open file %s.\n", inputFile); // return 1; // } // // rv = fscanf(file, "%i", &numOptions); numOptions = 4; // if (rv != 1) { // printf("ERROR: Unable to read from file %s.\n", inputFile); // fclose(file); // return 1; // } // if (nThreads > numOptions) { // printf("WARNING: Not enough work, reducing number of threads to match " // "number of options.\n"); // nThreads = numOptions; // } #if !defined(ENABLE_THREADS) && !defined(ENABLE_OPENMP) && !defined(ENABLE_TBB) if (nThreads != 1) { printf("Error: <nthreads> must be 1 (serial version)\n"); return 1; } #endif // alloc spaces for the option data data = (OptionData *)malloc(numOptions * sizeof(OptionData)); prices = (fptype *)malloc(numOptions * sizeof(fptype)); for (loopnum = 0; loopnum < 2; ++loopnum) { data[loopnum].s = 42; data[loopnum].strike = 40; data[loopnum].r = 0.1; data[loopnum].divq = 0; data[loopnum].v = 0.2; data[loopnum].t = 0.5; data[loopnum].divs = 0; } data[0].OptionType = 'P'; data[1].OptionType = 'C'; data[0].DGrefval = 4.759423036851750055; data[1].DGrefval = 0.808600016880314021; for (loopnum = 2; loopnum < 4; ++loopnum) { data[loopnum].s = 100; data[loopnum].strike = 100; data[loopnum].r = 0.5; data[loopnum].divq = 0; data[loopnum].v = 0.15; data[loopnum].t = 1; data[loopnum].divs = 0; } data[2].OptionType = 'P'; data[3].OptionType = 'C'; data[2].DGrefval = 3.714602051381290071; data[3].DGrefval = 8.591659601309890704; #ifdef ENABLE_THREADS pthread_mutexattr_init(&_M4_normalMutexAttr); // pthread_mutexattr_settype( &_M4_normalMutexAttr, PTHREAD_MUTEX_NORMAL); _M4_numThreads = nThreads; { int _M4_i; for (_M4_i = 0; _M4_i < MAX_THREADS; _M4_i++) { _M4_threadsTableAllocated[_M4_i] = 0; } }; #endif printf("Num of Options: %d\n", numOptions); printf("Num of Runs: %d\n", NUM_RUNS); #define PAD 256 #define LINESIZE 64 buffer = (fptype *)malloc(5 * numOptions * sizeof(fptype) + PAD); sptprice = (fptype *)(((unsigned long long)buffer + PAD) & ~(LINESIZE - 1)); strike = sptprice + numOptions; rate = strike + numOptions; volatility = rate + numOptions; otime = volatility + numOptions; buffer2 = (int *)malloc(numOptions * sizeof(fptype) + PAD); otype = (int *)(((unsigned long long)buffer2 + PAD) & ~(LINESIZE - 1)); for (i = 0; i < numOptions; i++) { otype[i] = (data[i].OptionType == 'P') ? 1 : 0; sptprice[i] = data[i].s; strike[i] = data[i].strike; rate[i] = data[i].r; volatility[i] = data[i].v; otime[i] = data[i].t; } printf("Size of data: %d\n", numOptions * (sizeof(OptionData) + sizeof(int))); #ifdef ENABLE_PARSEC_HOOKS __parsec_roi_begin(); #endif #ifdef ENABLE_THREADS #ifdef WIN32 printf("WIN32\n"); HANDLE *threads; int *nums; threads = (HANDLE *)malloc(nThreads * sizeof(HANDLE)); nums = (int *)malloc(nThreads * sizeof(int)); for (i = 0; i < nThreads; i++) { nums[i] = i; threads[i] = CreateThread(0, 0, bs_thread, &nums[i], 0, 0); } WaitForMultipleObjects(nThreads, threads, TRUE, INFINITE); free(threads); free(nums); #else int *tids; tids = (int *)malloc(nThreads * sizeof(int)); for (i = 0; i < nThreads; i++) { tids[i] = i; { int _M4_i; for (_M4_i = 0; _M4_i < MAX_THREADS; _M4_i++) { if (_M4_threadsTableAllocated[_M4_i] == 0) break; } pthread_create(&_M4_threadsTable[_M4_i], NULL, (void *(*)(void *))bs_thread, (void *)&tids[i]); _M4_threadsTableAllocated[_M4_i] = 1; }; } { int _M4_i; void *_M4_ret; for (_M4_i = 0; _M4_i < MAX_THREADS; _M4_i++) { if (_M4_threadsTableAllocated[_M4_i] == 0) break; pthread_join(_M4_threadsTable[_M4_i], &_M4_ret); } }; free(tids); #endif // WIN32 #else // ENABLE_THREADS #ifdef ENABLE_OPENMP { int tid = 0; omp_set_num_threads(nThreads); bs_thread(&tid); } #else // ENABLE_OPENMP #ifdef ENABLE_TBB tbb::task_scheduler_init init(nThreads); int tid = 0; bs_thread(&tid); #else // ENABLE_TBB // serial version int tid = 0; bs_thread(&tid); #endif // ENABLE_TBB #endif // ENABLE_OPENMP #endif // ENABLE_THREADS #ifdef ENABLE_PARSEC_HOOKS __parsec_roi_end(); #endif // Write prices to output file // file = fopen(outputFile, "w"); // if (file == NULL) { // printf("ERROR: Unable to open file %s.\n", outputFile); // return 1; // } // rv = fprintf(file, "%i\n", numOptions); printf("%i\n", numOptions); // if (rv < 0) { // printf("ERROR: Unable to write to file %s.\n", outputFile); // fclose(file); // return 1; // } for (i = 0; i < numOptions; i++) { // rv = fprintf(file, "%.18f\n", prices[i]); printf("%.18f\n", prices[i]); // if (rv < 0) { // printf("ERROR: Unable to write to file %s.\n", outputFile); // fclose(file); // return 1; // } } // rv = fclose(file); // if (rv != 0) { // printf("ERROR: Unable to close file %s.\n", outputFile); // return 1; // } #ifdef ERR_CHK printf("Num Errors: %d\n", numError); #endif free(data); free(prices); #ifdef ENABLE_PARSEC_HOOKS __parsec_bench_end(); #endif malicious_end(); return 1; }
SpatialMaxPooling.c
#include <math.h> #include "../thnets.h" static void nnfree_SpatialMaxPooling(struct module *mod) { THFloatTensor_free(mod->SpatialMaxPooling.indices); } int nnload_SpatialMaxPooling(struct module *mod, struct nnmodule *n) { struct table *t = n->table; mod->type = MT_SpatialMaxPooling; mod->updateOutput = nn_SpatialMaxPooling_updateOutput; mod->nnfree = nnfree_SpatialMaxPooling; struct SpatialMaxPooling *m = &mod->SpatialMaxPooling; m->padW = TableGetNumber(t, "padW"); m->padH = TableGetNumber(t, "padH"); m->dW = TableGetNumber(t, "dW"); m->dH = TableGetNumber(t, "dH"); m->kW = TableGetNumber(t, "kW"); m->kH = TableGetNumber(t, "kH"); m->ceil_mode = TableGetNumber(t, "ceil_mode"); m->indices = THFloatTensor_new(); return 0; } void pyload_SpatialMaxPooling(struct pyfunction *f) { struct SpatialMaxPooling *p = &f->module.SpatialMaxPooling; f->module.updateOutput = nn_SpatialMaxPooling_updateOutput; f->module.type = MT_SpatialMaxPooling; f->module.nnfree = nnfree_SpatialMaxPooling; p->indices = THFloatTensor_new(); struct pyelement *el; if( (el = findelement(f->params, "padding", 0)) && el->type == ELTYPE_INTVECT) { p->padH = el->ivect[0]; p->padW = el->ivect[1]; } if( (el = findelement(f->params, "stride", 0)) && el->type == ELTYPE_INTVECT) { p->dH = el->ivect[0]; p->dW = el->ivect[1]; } if( (el = findelement(f->params, "kernel_size", 0)) && el->type == ELTYPE_INTVECT) { p->kH = el->ivect[0]; p->kW = el->ivect[1]; } if( (el = findelement(f->params, "ceil_mode", 0)) && el->type == ELTYPE_INT) p->ceil_mode = el->ivalue; } #ifdef ONNX void onnxload_SpatialMaxPooling(const void *graph, struct module *m, int nodeidx) { m->updateOutput = nn_SpatialMaxPooling_updateOutput; m->type = MT_SpatialMaxPooling; m->nnfree = nnfree_SpatialMaxPooling; struct SpatialMaxPooling *p = &m->SpatialMaxPooling; p->indices = THFloatTensor_new(); p->kH = onnx_getint(graph, nodeidx, "kernel_shape", 0); p->kW = onnx_getint(graph, nodeidx, "kernel_shape", 1); p->padH = onnx_getint(graph, nodeidx, "pads", 0); p->padW = onnx_getint(graph, nodeidx, "pads", 1); p->padH2 = onnx_getint(graph, nodeidx, "pads", 2); p->padW2 = onnx_getint(graph, nodeidx, "pads", 3); p->dH = onnx_getint(graph, nodeidx, "strides", 0); p->dW = onnx_getint(graph, nodeidx, "strides", 1); p->ceil_mode = 0; } #endif static void nn_SpatialMaxPooling_updateOutput_frame(float *input_p, float *output_p, float *ind_p, long nslices, long iwidth, long iheight, long owidth, long oheight, int kW, int kH, int dW, int dH, int padW, int padH) { long k; #pragma omp parallel for private(k) for (k = 0; k < nslices; k++) { float *ip = input_p + k*iwidth*iheight; float *op = output_p + k*owidth*oheight; float *indp = ind_p + k*owidth*oheight; long i, j; for (i = 0; i < oheight; i++) { for (j = 0; j < owidth; j++) { long hstart = i * dH - padH; long wstart = j * dW - padW; long hend = thfminf(hstart + kH, iheight); long wend = thfminf(wstart + kW, iwidth); hstart = thfmaxf(hstart, 0); wstart = thfmaxf(wstart, 0); long maxindex = -1; float maxval = -THInf; long x, y; for (y = hstart; y < hend; y++) { for (x = wstart; x < wend; x++) { float val = *(ip + y*iwidth + x); if (val > maxval) { maxval = val; maxindex = y*iwidth + x; } } } *(op + i*owidth + j) = maxval; *(indp + i*owidth + j) = maxindex + 1; } } } } static void nn_SpatialMaxPooling_updateOutput_frame_planeminor(float *input_p, float *output_p, float *ind_p, long nslices, long iwidth, long iheight, long owidth, long oheight, int kW, int kH, int dW, int dH, int padW, int padH) { long i; #pragma omp parallel for private(i) for (i = 0; i < oheight; i++) { long j; for (j = 0; j < owidth; j++) { long k; for (k = 0; k < nslices; k++) { float *ip = input_p + k; float *op = output_p + k; float *indp = ind_p + k; long hstart = i * dH - padH; long wstart = j * dW - padW; long hend = thfminf(hstart + kH, iheight); long wend = thfminf(wstart + kW, iwidth); hstart = thfmaxf(hstart, 0); wstart = thfmaxf(wstart, 0); long maxindex = -1; float maxval = -THInf; long x, y; for (y = hstart; y < hend; y++) { for (x = wstart; x < wend; x++) { float val = *(ip + (y*iwidth + x) * nslices); if (val > maxval) { maxval = val; maxindex = y*iwidth + x; } } } *(op + (i*owidth + j) * nslices) = maxval; *(indp + (i*owidth + j) * nslices) = maxindex + 1; } } } } THFloatTensor *nn_SpatialMaxPooling_updateOutput(struct module *module, THFloatTensor *input) { int kW = module->SpatialMaxPooling.kW; int kH = module->SpatialMaxPooling.kH; int dW = module->SpatialMaxPooling.dW; int dH = module->SpatialMaxPooling.dH; int padW = module->SpatialMaxPooling.padW; int padH = module->SpatialMaxPooling.padH; int ceil_mode = module->SpatialMaxPooling.ceil_mode; THFloatTensor *output = module->output; THFloatTensor *indices = module->SpatialMaxPooling.indices; int batch = 1; if (input->nDimension == 3) { batch = 0; THFloatTensor_resize4d(input, 1, input->size[0], input->size[1], input->size[2]); } long batchSize = input->size[0]; long nslices = input->size[1]; long iheight = input->size[2]; long iwidth = input->size[3]; int planeminor = input->stride[1] == 1; module->SpatialMaxPooling.iwidth = (int)iwidth; module->SpatialMaxPooling.iheight = (int)iheight; long oheight; long owidth; if (ceil_mode) { oheight = (long)(ceil((float)(iheight - kH + 2*padH) / dH)) + 1; owidth = (long)(ceil((float)(iwidth - kW + 2*padW) / dW)) + 1; } else { oheight = (long)(floor((float)(iheight - kH + 2*padH) / dH)) + 1; owidth = (long)(floor((float)(iwidth - kW + 2*padW) / dW)) + 1; } if (padW || padH) { // ensure that the last pooling starts inside the image if ((oheight - 1)*dH >= iheight + padH) --oheight; if ((owidth - 1)*dW >= iwidth + padW) --owidth; } THFloatTensor_resize4d(output, batchSize, nslices, oheight, owidth); THFloatTensor_resize4d(indices, batchSize, nslices, oheight, owidth); float *input_data = THFloatTensor_data(input); float *output_data = THFloatTensor_data(output); float *indices_data = THFloatTensor_data(indices); long p; if(planeminor) { #pragma omp parallel for private(p) for (p = 0; p < batchSize; p++) { nn_SpatialMaxPooling_updateOutput_frame_planeminor(input_data+p*nslices*iwidth*iheight, output_data+p*nslices*owidth*oheight, indices_data+p*nslices*owidth*oheight, nslices, iwidth, iheight, owidth, oheight, kW, kH, dW, dH, padW, padH); } } else { #pragma omp parallel for private(p) for (p = 0; p < batchSize; p++) { nn_SpatialMaxPooling_updateOutput_frame(input_data+p*nslices*iwidth*iheight, output_data+p*nslices*owidth*oheight, indices_data+p*nslices*owidth*oheight, nslices, iwidth, iheight, owidth, oheight, kW, kH, dW, dH, padW, padH); } } if (batch == 0) { THFloatTensor_resize3d(output, nslices, oheight, owidth); THFloatTensor_resize3d(indices, nslices, oheight, owidth); THFloatTensor_resize3d(input, nslices, iheight, iwidth); } return output; }
GB_binop__div_uint16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__div_uint16 // A.*B function (eWiseMult): GB_AemultB__div_uint16 // A*D function (colscale): GB_AxD__div_uint16 // D*A function (rowscale): GB_DxB__div_uint16 // C+=B function (dense accum): GB_Cdense_accumB__div_uint16 // C+=b function (dense accum): GB_Cdense_accumb__div_uint16 // C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__div_uint16 // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__div_uint16 // C=scalar+B GB_bind1st__div_uint16 // C=scalar+B' GB_bind1st_tran__div_uint16 // C=A+scalar GB_bind2nd__div_uint16 // C=A'+scalar GB_bind2nd_tran__div_uint16 // C type: uint16_t // A type: uint16_t // B,b type: uint16_t // BinaryOp: cij = GB_IDIV_UNSIGNED (aij, bij, 16) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ uint16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint16_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = GB_IDIV_UNSIGNED (x, y, 16) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_DIV || GxB_NO_UINT16 || GxB_NO_DIV_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB_Cdense_ewise3_accum__div_uint16 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__div_uint16 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__div_uint16 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__div_uint16 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__div_uint16 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *GB_RESTRICT Cx = (uint16_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__div_uint16 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *GB_RESTRICT Cx = (uint16_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__div_uint16 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__div_uint16 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__div_uint16 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint16_t bij = Bx [p] ; Cx [p] = GB_IDIV_UNSIGNED (x, bij, 16) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__div_uint16 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint16_t aij = Ax [p] ; Cx [p] = GB_IDIV_UNSIGNED (aij, y, 16) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = Ax [pA] ; \ Cx [pC] = GB_IDIV_UNSIGNED (x, aij, 16) ; \ } GrB_Info GB_bind1st_tran__div_uint16 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = Ax [pA] ; \ Cx [pC] = GB_IDIV_UNSIGNED (aij, y, 16) ; \ } GrB_Info GB_bind2nd_tran__div_uint16 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
dotp.c
#include <math.h> #include <omp.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #define REPEAT 100 int array_size = 10000000; double *gen_array(int n) { double *array = malloc(sizeof(*array) * n); for (int i = 0; i < n; i++) array[i] = drand48(); return array; } double dotp_naive(double *x, double *y, int arr_size) { double global_sum = 0.0; #pragma omp parallel { #pragma omp for for (int i = 0; i < arr_size; i++) #pragma omp critical global_sum += x[i] * y[i]; } return global_sum; } // EDIT THIS FUNCTION PART 1 double dotp_manual_optimized(double *x, double *y, int arr_size) { double global_sum = 0.0; double local_sum = 0.0; #pragma omp parallel private(local_sum) { local_sum = 0.0; #pragma omp for for (int i = 0; i < arr_size; i++) local_sum += x[i] * y[i]; #pragma omp critical global_sum += local_sum; } return global_sum; } // EDIT THIS FUNCTION PART 2 double dotp_reduction_optimized(double *x, double *y, int arr_size) { double global_sum = 0.0; #pragma omp parallel for reduction(+:global_sum) for (int i = 0; i < arr_size; i++) global_sum += x[i] * y[i]; return global_sum; } int main() { // Generate input vectors double *x = gen_array(array_size), *y = gen_array(array_size); double start_time, run_time; double serial_result, result; int ret = 0; // calculate result serially start_time = omp_get_wtime(); for (int j = 0; j < REPEAT; j++) { serial_result = 0.0; for (int i = 0; i < array_size; i++) serial_result += x[i] * y[i]; } run_time = omp_get_wtime() - start_time; printf("Naive solution took %f seconds\n", run_time); int num_threads = omp_get_max_threads(); for (int i = 1; i <= num_threads; i++) { omp_set_num_threads(i); start_time = omp_get_wtime(); for (int j = 0; j < REPEAT; j++) result = dotp_manual_optimized(x, y, array_size); run_time = omp_get_wtime() - start_time; printf("Manual Optimized: %d thread(s) took %f seconds\n", i, run_time); // verify result is correct (within some threshold) if (fabs(serial_result - result) > 0.001) { printf("Incorrect result!\n"); ret = -1; goto exit; } } for (int i = 1; i <= num_threads; i++) { omp_set_num_threads(i); start_time = omp_get_wtime(); for (int j = 0; j < REPEAT; j++) result = dotp_reduction_optimized(x, y, array_size); run_time = omp_get_wtime() - start_time; printf("Reduction Optimized: %d thread(s) took %f seconds\n", i, run_time); // verify result is correct (within some threshold) if (fabs(serial_result - result) > 0.001) { printf("Incorrect result!\n"); ret = -1; goto exit; } } // Only run this once because it's too slow.. omp_set_num_threads(1); start_time = omp_get_wtime(); for (int j = 0; j < REPEAT; j++) result = dotp_naive(x, y, array_size); run_time = omp_get_wtime() - start_time; printf("Naive: %d thread(s) took %f seconds\n", 1, run_time); exit: free(x); free(y); return ret; }
nn.c
#include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <sys/time.h> #include <omp.h> #define MAX_ARGS 10 #define REC_LENGTH 49 // size of a record in db #define REC_WINDOW 10 // number of records to read at a time #define LATITUDE_POS 28 // location of latitude coordinates in input record #define OPEN 10000 // initial value of nearest neighbors struct neighbor { char entry[REC_LENGTH]; double dist; }; /** * This program finds the k-nearest neighbors * Usage: ./nn <filelist> <num> <target latitude> <target longitude> * filelist: File with the filenames to the records * num: Number of nearest neighbors to find * target lat: Latitude coordinate for distance *calculations * target long: Longitude coordinate for distance *calculations * The filelist and data are generated by hurricane_gen.c * REC_WINDOW has been arbitrarily assigned; A larger value would allow more work *for the threads */ int main(int argc, char *argv[]) { FILE *flist, *fp; int i = 0, j = 0, k = 0, rec_count = 0, done = 0; char sandbox[REC_LENGTH * REC_WINDOW], *rec_iter, dbname[64]; struct neighbor *neighbors = NULL; float target_lat, target_long, tmp_lat = 0, tmp_long = 0; if (argc < 5) { fprintf(stderr, "Invalid set of arguments\n"); exit(-1); } flist = fopen(argv[1], "r"); if (!flist) { printf("error opening flist\n"); exit(1); } k = atoi(argv[2]); target_lat = atof(argv[3]); target_long = atof(argv[4]); neighbors = malloc(k * sizeof(struct neighbor)); if (neighbors == NULL) { fprintf(stderr, "no room for neighbors\n"); exit(0); } for (j = 0; j < k; j++) { // Initialize list of nearest neighbors to very large dist neighbors[j].dist = OPEN; } /**** main processing ****/ if (fscanf(flist, "%s\n", dbname) != 1) { fprintf(stderr, "error reading filelist\n"); exit(0); } fp = fopen(dbname, "r"); if (!fp) { printf("error opening flist\n"); exit(1); } float *z; z = (float *)malloc(REC_WINDOW * sizeof(float)); while (!done) { // Read in REC_WINDOW number of records rec_count = fread(sandbox, REC_LENGTH, REC_WINDOW, fp); if (rec_count != REC_WINDOW) { if (!ferror(flist)) { // an eof occured fclose(fp); if (feof(flist)) done = 1; else { if (fscanf(flist, "%s\n", dbname) != 1) { fprintf(stderr, "error reading filelist\n"); exit(0); } fp = fopen(dbname, "r"); if (!fp) { printf("error opening a db\n"); exit(1); } } } else { perror("Error"); exit(0); } } /* Launch threads to */ #pragma omp parallel for shared(z, target_lat, target_long) private( \ i, rec_iter, tmp_lat, tmp_long) for (i = 0; i < rec_count; i++) { rec_iter = sandbox + (i * REC_LENGTH + LATITUDE_POS - 1); sscanf(rec_iter, "%f %f", &tmp_lat, &tmp_long); z[i] = sqrt(((tmp_lat - target_lat) * (tmp_lat - target_lat)) + ((tmp_long - target_long) * (tmp_long - target_long))); } /* omp end parallel */ #pragma omp barrier for (i = 0; i < rec_count; i++) { float max_dist = -1; int max_idx = 0; // find a neighbor with greatest dist and take his spot if allowed! for (j = 0; j < k; j++) { if (neighbors[j].dist > max_dist) { max_dist = neighbors[j].dist; max_idx = j; } } // compare each record with max value to find the nearest neighbor if (z[i] < neighbors[max_idx].dist) { sandbox[(i + 1) * REC_LENGTH - 1] = '\0'; strcpy(neighbors[max_idx].entry, sandbox + i * REC_LENGTH); neighbors[max_idx].dist = z[i]; } } } // End while loop if (getenv("OUTPUT")) { FILE* out = fopen("output.txt", "w"); fprintf(out, "The %d nearest neighbors are:\n", k); for (j = k - 1; j >= 0; j--) { if (!(neighbors[j].dist == OPEN)) fprintf(out, "%s --> %f\n", neighbors[j].entry, neighbors[j].dist); } fclose(out); } fclose(flist); return 0; }
lis_precon_jacobi.c
/* Copyright (C) 2002-2012 The SSI Project. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the project nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE SCALABLE SOFTWARE INFRASTRUCTURE PROJECT ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE SCALABLE SOFTWARE INFRASTRUCTURE PROJECT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifdef HAVE_CONFIG_H #include "lis_config.h" #else #ifdef HAVE_CONFIG_WIN32_H #include "lis_config_win32.h" #endif #endif #include <stdio.h> #include <stdlib.h> #ifdef HAVE_MALLOC_H #include <malloc.h> #endif #include <string.h> #include <stdarg.h> #ifdef USE_SSE2 #include <emmintrin.h> #endif #ifdef _OPENMP #include <omp.h> #endif #ifdef USE_MPI #include <mpi.h> #endif #include "lislib.h" #undef __FUNC__ #define __FUNC__ "lis_precon_create_jacobi" LIS_INT lis_precon_create_jacobi(LIS_SOLVER solver, LIS_PRECON precon) { LIS_INT err; LIS_DEBUG_FUNC_IN; if( solver->precision==LIS_PRECISION_DEFAULT ) { err = lis_vector_duplicate(solver->A, &precon->D); } else { err = lis_vector_duplicateex(LIS_PRECISION_QUAD,solver->A, &precon->D); } if( err ) { return err; } lis_matrix_get_diagonal(solver->A, precon->D); lis_vector_reciprocal(precon->D); LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_psolve_jacobi" LIS_INT lis_psolve_jacobi(LIS_SOLVER solver, LIS_VECTOR B, LIS_VECTOR X) { LIS_INT i,n; LIS_SCALAR *b,*x,*d; LIS_PRECON precon; LIS_QUAD_DECLAR; #ifdef USE_QUAD_PRECISION LIS_SCALAR *xl; #endif LIS_DEBUG_FUNC_IN; /* * Mx = b * M = D */ precon = solver->precon; n = precon->D->n; d = precon->D->value; b = B->value; x = X->value; #ifdef USE_QUAD_PRECISION xl = X->value_lo; #endif #ifdef USE_QUAD_PRECISION if( B->precision==LIS_PRECISION_DEFAULT ) { #endif #ifdef _OPENMP #pragma omp parallel for private(i) #endif for(i=0; i<n; i++) { x[i] = b[i] * d[i]; } #ifdef USE_QUAD_PRECISION } else { #ifdef _OPENMP #ifndef USE_SSE2 #pragma omp parallel for private(i,p1,p2,tq,bhi,blo,chi,clo,sh,sl,th,tl,eh,el) #else #pragma omp parallel for private(i,bh,ch,sh,wh,th,bl,cl,sl,wl,tl,p1,p2,t0,t1,t2,eh) #endif #endif for(i=0; i<n; i++) { #ifndef USE_SSE2 LIS_QUAD_MULD(x[i],xl[i],B->value[i],B->value_lo[i],d[i]); #else LIS_QUAD_MULD_SSE2(x[i],xl[i],B->value[i],B->value_lo[i],d[i]); #endif /* x[i] = b[i] * d[i]; */ } } #endif LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_psolvet_jacobi" LIS_INT lis_psolvet_jacobi(LIS_SOLVER solver, LIS_VECTOR B, LIS_VECTOR X) { LIS_INT i,n; LIS_SCALAR *b,*x,*d; LIS_PRECON precon; LIS_QUAD_DECLAR; #ifdef USE_QUAD_PRECISION LIS_SCALAR *xl; #endif LIS_DEBUG_FUNC_IN; /* * Mx = b * M = D */ precon = solver->precon; n = precon->D->n; d = precon->D->value; b = B->value; x = X->value; #ifdef USE_QUAD_PRECISION xl = X->value_lo; #endif #ifdef USE_QUAD_PRECISION if( B->precision==LIS_PRECISION_DEFAULT ) { #endif #ifdef _OPENMP #pragma omp parallel for private(i) #endif for(i=0; i<n; i++) { x[i] = b[i] * d[i]; } #ifdef USE_QUAD_PRECISION } else { #ifdef _OPENMP #ifndef USE_SSE2 #pragma omp parallel for private(i,p1,p2,tq,bhi,blo,chi,clo,sh,sl,th,tl,eh,el) #else #pragma omp parallel for private(i,bh,ch,sh,wh,th,bl,cl,sl,wl,tl,p1,p2,t0,t1,t2,eh) #endif #endif for(i=0; i<n; i++) { #ifndef USE_SSE2 LIS_QUAD_MULD(x[i],xl[i],B->value[i],B->value_lo[i],d[i]); #else LIS_QUAD_MULD_SSE2(x[i],xl[i],B->value[i],B->value_lo[i],d[i]); #endif /* x[i] = b[i] * d[i]; */ } } #endif LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_precon_create_bjacobi" LIS_INT lis_precon_create_bjacobi(LIS_SOLVER solver, LIS_PRECON precon) { LIS_INT err; LIS_MATRIX A; LIS_DEBUG_FUNC_IN; A = solver->A; err = lis_matrix_convert_self(solver); if( err ) return err; if( !A->is_block ) { solver->options[LIS_OPTIONS_PRECON] = LIS_PRECON_TYPE_JACOBI; precon->precon_type = LIS_PRECON_TYPE_JACOBI; err = lis_precon_create_jacobi(solver,precon); return err; } err = lis_matrix_split(A); if( err ) return err; err = lis_matrix_diag_duplicate(A->D,&precon->WD); if( err ) return err; lis_matrix_diag_copy(A->D,precon->WD); lis_matrix_diag_inverse(precon->WD); LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_psolve_bjacobi" LIS_INT lis_psolve_bjacobi(LIS_SOLVER solver, LIS_VECTOR B, LIS_VECTOR X) { LIS_PRECON precon; LIS_DEBUG_FUNC_IN; /* * Mx = b * M = D */ precon = solver->precon; lis_matrix_diag_matvec(precon->WD,B,X); LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_psolvet_bjacobi" LIS_INT lis_psolvet_bjacobi(LIS_SOLVER solver, LIS_VECTOR B, LIS_VECTOR X) { LIS_PRECON precon; LIS_DEBUG_FUNC_IN; /* * Mx = b * M = D */ precon = solver->precon; lis_matrix_diag_matvect(precon->WD,B,X); LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; }
libdecimate.c
/* * This file is part of the libdecimate library * (https://github.com/zindy/libdecimate) * Copyright (c) 2017 Egor Zindy * * libdecimate is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation, version 3. * * libdecimate is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <stdlib.h> #include <stdint.h> #include <errno.h> #include <math.h> #include <omp.h> #include "libdecimate.h" //takes three arrays (RGB) and a max value, returns a Nx3 list of unique RGB values void decimate( uint16_t *ArrayR, int ZdimR, int YdimR, int XdimR, uint16_t *ArrayG, int ZdimG, int YdimG, int XdimG, uint16_t *ArrayB, int ZdimB, int YdimB, int XdimB, int input_bit, int output_bit, uint16_t **ArrayOut, int *YdimOut, int *XdimOut) { uint64_t i,j,n=0; //output array uint16_t* decomp = NULL; int decimation = input_bit - output_bit; decimation = (decimation > 0)?decimation:0; uint64_t max_out = pow(2,output_bit)-1; uint64_t signature, temp_r, temp_g, temp_b, max_signature=0; //This is the truth array that holds all three RGB values uint8_t* truth = NULL; if ( ZdimR != ZdimG || ZdimR != ZdimB || YdimR != YdimG || YdimR != YdimB || XdimR != XdimG || XdimR != XdimB) { errno = E2BIG; goto end; } //The truth table needs to encompass all the values from 0 to 2**outputbits in all three dimensions. truth = (uint8_t *)calloc(pow(2,(output_bit*3)), sizeof(uint8_t)); if (truth == NULL) { errno = ENOMEM; goto end; } //The strategy here is to fill truth array if we have a hit. Simultaneously, we could have multiple threads hitting the same //truth index. So don't count the number of hits as part of this loop, we can do that afterwards #pragma omp parallel for \ default(shared) private(temp_r, temp_g, temp_b, signature) for (i=0; i<ZdimR*YdimR*XdimR; i++) { temp_r = (uint64_t)ArrayR[i] >> decimation; if (temp_r > max_out) temp_r = max_out; temp_g = (uint64_t)ArrayG[i] >> decimation; if (temp_g > max_out) temp_g = max_out; temp_b = (uint64_t)ArrayB[i] >> decimation; if (temp_b > max_out) temp_b = max_out; //construct the rgb signature signature = temp_r | (temp_g << output_bit) | (temp_b << (output_bit*2)); if (truth[signature] == 1) continue; if (signature > max_signature) { max_signature = signature; } //printf("signature=%d max_sig=%d\n",signature,max_signature); truth[signature] = 1; } //we need to count the total number of hits, this is to determine the size of the output array (nx3). n = 0; #pragma omp parallel for \ default(shared) reduction( + : n ) for (i=0; i<=max_signature; i++) { if (truth[i] > 0) n++; } //output is 16 bit (up to) //could definitely combine these both into a single realloc. This just reminds me what is going on. if (*ArrayOut == NULL) { decomp = (uint16_t *)malloc(3*n*sizeof(uint16_t)); } else { decomp = (uint16_t *)realloc(*ArrayOut, 3*n*sizeof(uint16_t)); } if (decomp == NULL) { errno = ENOMEM; goto end; } //use this as a mask of width output_bit to unpack the index back into 3 r,g,b values signature = pow(2,output_bit) - 1; j = 0; for (i=0; i<=max_signature; i++) { if (truth[i] > 0) { //printf("i=%d j=%d truth[i]=%d\n",i,j,truth[i]); temp_r = i & signature; temp_g = (i >> output_bit) & signature; temp_b = (i >> (output_bit*2)) & signature; decomp[j++] = (uint16_t)temp_r; decomp[j++] = (uint16_t)temp_g; decomp[j++] = (uint16_t)temp_b; truth[i] = 0; } } end: if (truth != NULL) free(truth); *ArrayOut = decomp; *YdimOut = n; *XdimOut = 3; } //This re void decimate_indexes( uint16_t *ArrayR, int ZdimR, int YdimR, int XdimR, uint16_t *ArrayG, int ZdimG, int YdimG, int XdimG, uint16_t *ArrayB, int ZdimB, int YdimB, int XdimB, int input_bit, int output_bit, uint64_t **ArrayOut, int *NdimOut) { uint64_t i,j,n=0; //output array uint64_t* decomp = NULL; int decimation = input_bit - output_bit; decimation = (decimation > 0)?decimation:0; uint64_t max_out = pow(2,output_bit)-1; uint64_t signature, temp_r, temp_g, temp_b, max_signature=0; //This is the truth array that holds all three RGB values uint8_t* truth = NULL; uint64_t* truth_index = NULL; if ( ZdimR != ZdimG || ZdimR != ZdimB || YdimR != YdimG || YdimR != YdimB || XdimR != XdimG || XdimR != XdimB) { errno = E2BIG; goto end; } //The truth table needs to encompass all the values from 0 to 2**outputbits in all three dimensions. truth = (uint8_t *)calloc(pow(2,(output_bit*3)), sizeof(uint8_t)); truth_index = (uint64_t *)malloc(pow(2,(output_bit*3))*sizeof(uint64_t)); if (truth == NULL || truth_index == NULL) { errno = ENOMEM; goto end; } //The strategy here is to fill truth array if we have a hit. Simultaneously, we could have multiple threads hitting the same //truth index. So don't count the number of hits as part of this loop, we can do that afterwards #pragma omp parallel for \ default(shared) private(temp_r, temp_g, temp_b, signature) for (i=0; i<ZdimR*YdimR*XdimR; i++) { temp_r = (uint64_t)ArrayR[i] >> decimation; if (temp_r > max_out) temp_r = max_out; temp_g = (uint64_t)ArrayG[i] >> decimation; if (temp_g > max_out) temp_g = max_out; temp_b = (uint64_t)ArrayB[i] >> decimation; if (temp_b > max_out) temp_b = max_out; //construct the rgb signature signature = temp_r | (temp_g << output_bit) | (temp_b << (output_bit*2)); if (truth[signature] == 1) continue; if (signature > max_signature) { max_signature = signature; } truth[signature] = 1; truth_index[signature] = i; //printf("signature=%d max_sig=%d i=%d truth_index[signature]=%d\n",signature,max_signature,i, truth_index[signature]); } //we need to count the total number of hits, this is to determine the size of the output array (nx3). n = 0; #pragma omp parallel for \ default(shared) reduction( + : n ) for (i=0; i<=max_signature; i++) { if (truth[i] > 0) n++; } //output is 16 bit (up to) //could definitely combine these both into a single realloc. This just reminds me what is going on. if (*ArrayOut == NULL) { decomp = (uint64_t *)malloc(n*sizeof(uint64_t)); } else { decomp = (uint64_t *)realloc(*ArrayOut, n*sizeof(uint64_t)); } if (decomp == NULL) { errno = ENOMEM; goto end; } j = 0; for (i=0; i<=max_signature; i++) { if (truth[i] > 0) { //printf("i=%d j=%d truth[i]=%d truth_index[i]=%d\n",i,j,truth[i],truth_index[i]); decomp[j] = truth_index[i]; truth[i] = 0; j++; } } end: if (truth != NULL) free(truth); if (truth_index != NULL) free(truth_index); *ArrayOut = decomp; *NdimOut = n; }
convolution_1x1_pack8_fp16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv1x1s1_sgemm_transform_kernel_pack8_fp16sa_neon(const Mat& kernel, Mat& kernel_tm_pack8, int inch, int outch) { // interleave // src = inch-outch // dst = 8b-8a-inch/8a-outch/8b kernel_tm_pack8.create(1, inch / 8, outch / 8, (size_t)2u * 64, 64); int q = 0; for (; q + 7 < outch; q += 8) { const float* k0 = (const float*)kernel + (q + 0) * inch; const float* k1 = (const float*)kernel + (q + 1) * inch; const float* k2 = (const float*)kernel + (q + 2) * inch; const float* k3 = (const float*)kernel + (q + 3) * inch; const float* k4 = (const float*)kernel + (q + 4) * inch; const float* k5 = (const float*)kernel + (q + 5) * inch; const float* k6 = (const float*)kernel + (q + 6) * inch; const float* k7 = (const float*)kernel + (q + 7) * inch; __fp16* g0 = kernel_tm_pack8.channel(q / 8); for (int p = 0; p + 7 < inch; p += 8) { for (int i = 0; i < 8; i++) { g0[0] = (__fp16)k0[i]; g0[1] = (__fp16)k1[i]; g0[2] = (__fp16)k2[i]; g0[3] = (__fp16)k3[i]; g0[4] = (__fp16)k4[i]; g0[5] = (__fp16)k5[i]; g0[6] = (__fp16)k6[i]; g0[7] = (__fp16)k7[i]; g0 += 8; } k0 += 8; k1 += 8; k2 += 8; k3 += 8; k4 += 8; k5 += 8; k6 += 8; k7 += 8; } } } static void conv1x1s1_sgemm_pack8_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outch = top_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; const int size = w * h; const __fp16* bias = _bias; // interleave Mat tmp; if (size >= 12) tmp.create(12, inch, size / 12 + (size % 12) / 8 + (size % 12 % 8) / 4 + (size % 12 % 4) / 2 + size % 12 % 2, elemsize, elempack, opt.workspace_allocator); else if (size >= 8) tmp.create(8, inch, size / 8 + (size % 8) / 4 + (size % 4) / 2 + size % 2, elemsize, elempack, opt.workspace_allocator); else if (size >= 4) tmp.create(4, inch, size / 4 + (size % 4) / 2 + size % 2, elemsize, elempack, opt.workspace_allocator); else if (size >= 2) tmp.create(2, inch, size / 2 + size % 2, elemsize, elempack, opt.workspace_allocator); else // if (size >= 1) tmp.create(1, inch, size, elemsize, elempack, opt.workspace_allocator); { int nn_size; int remain_size_start; nn_size = size / 12; remain_size_start = nn_size * 12; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = ii * 12; const __fp16* img0 = bottom_blob.channel(0); img0 += i * 8; __fp16* tmpptr = tmp.channel(i / 12); for (int q = 0; q < inch; q++) { // transpose 12x8 asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0], #64 \n" "ld4 {v4.8h, v5.8h, v6.8h, v7.8h}, [%0], #64 \n" "ld4 {v16.8h, v17.8h, v18.8h, v19.8h}, [%0] \n" "sub %0, %0, #128 \n" "uzp1 v20.8h, v0.8h, v4.8h \n" // 0 "uzp1 v21.8h, v16.8h, v1.8h \n" // 1 "uzp1 v22.8h, v5.8h, v17.8h \n" // 2 "uzp1 v23.8h, v2.8h, v6.8h \n" // 3 "uzp1 v24.8h, v18.8h, v3.8h \n" // 4 "uzp1 v25.8h, v7.8h, v19.8h \n" // 5 "uzp2 v26.8h, v0.8h, v4.8h \n" // 6 "uzp2 v27.8h, v16.8h, v1.8h \n" // 7 "uzp2 v28.8h, v5.8h, v17.8h \n" // 8 "uzp2 v29.8h, v2.8h, v6.8h \n" // 9 "uzp2 v30.8h, v18.8h, v3.8h \n" // 10 "uzp2 v31.8h, v7.8h, v19.8h \n" // 11 "st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n" "st1 {v24.8h, v25.8h, v26.8h, v27.8h}, [%1], #64 \n" "st1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%1], #64 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); img0 += bottom_blob.cstep * 8; } } nn_size = (size - remain_size_start) >> 3; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 8; const __fp16* img0 = bottom_blob.channel(0); img0 += i * 8; __fp16* tmpptr = tmp.channel(i / 12 + (i % 12) / 8); for (int q = 0; q < inch; q++) { // transpose 8x8 asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0], #64 \n" "ld4 {v4.8h, v5.8h, v6.8h, v7.8h}, [%0] \n" "sub %0, %0, #64 \n" "uzp1 v16.8h, v0.8h, v4.8h \n" "uzp2 v20.8h, v0.8h, v4.8h \n" "uzp1 v17.8h, v1.8h, v5.8h \n" "uzp2 v21.8h, v1.8h, v5.8h \n" "uzp1 v18.8h, v2.8h, v6.8h \n" "uzp2 v22.8h, v2.8h, v6.8h \n" "uzp1 v19.8h, v3.8h, v7.8h \n" "uzp2 v23.8h, v3.8h, v7.8h \n" "st1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%1], #64 \n" "st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); img0 += bottom_blob.cstep * 8; } } remain_size_start += nn_size << 3; nn_size = (size - remain_size_start) >> 2; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 4; const __fp16* img0 = bottom_blob.channel(0); img0 += i * 8; __fp16* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); for (int q = 0; q < inch; q++) { asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0] \n" "st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%1], #64 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0", "v1", "v2", "v3"); img0 += bottom_blob.cstep * 8; } } remain_size_start += nn_size << 2; nn_size = (size - remain_size_start) >> 1; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 2; const __fp16* img0 = bottom_blob.channel(0); img0 += i * 8; __fp16* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); for (int q = 0; q < inch; q++) { asm volatile( "prfm pldl1keep, [%0, #256] \n" "ld1 {v0.8h, v1.8h}, [%0] \n" "st1 {v0.8h, v1.8h}, [%1], #32 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0", "v1"); img0 += bottom_blob.cstep * 8; } } remain_size_start += nn_size << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int i = remain_size_start; i < size; i++) { const __fp16* img0 = bottom_blob.channel(0); img0 += i * 8; __fp16* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); for (int q = 0; q < inch; q++) { asm volatile( "prfm pldl1keep, [%0, #128] \n" "ld1 {v0.8h}, [%0] \n" "st1 {v0.8h}, [%1], #16 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0"); img0 += bottom_blob.cstep * 8; } } } #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { __fp16* outptr0 = top_blob.channel(p); const __fp16 zeros[8] = {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f}; const __fp16* biasptr = bias ? bias + p * 8 : zeros; int i = 0; for (; i + 11 < size; i += 12) { __fp16* tmpptr = tmp.channel(i / 12); const __fp16* kptr0 = kernel.channel(p); int nn = inch; // inch always > 0 asm volatile( "ld1 {v20.8h}, [%8] \n" "mov v21.16b, v20.16b \n" "mov v22.16b, v20.16b \n" "mov v23.16b, v20.16b \n" "mov v24.16b, v20.16b \n" "mov v25.16b, v20.16b \n" "mov v26.16b, v20.16b \n" "mov v27.16b, v20.16b \n" "mov v28.16b, v20.16b \n" "mov v29.16b, v20.16b \n" "mov v30.16b, v20.16b \n" "mov v31.16b, v20.16b \n" "0: \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%2], #64 \n" // r0123 "prfm pldl1keep, [%3, #512] \n" "ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%3], #64 \n" // w0123 "fmla v20.8h, v12.8h, v0.h[0] \n" "fmla v21.8h, v12.8h, v0.h[1] \n" "fmla v22.8h, v12.8h, v0.h[2] \n" "fmla v23.8h, v12.8h, v0.h[3] \n" "fmla v24.8h, v12.8h, v0.h[4] \n" "fmla v25.8h, v12.8h, v0.h[5] \n" "fmla v26.8h, v12.8h, v0.h[6] \n" "fmla v27.8h, v12.8h, v0.h[7] \n" "fmla v28.8h, v12.8h, v1.h[0] \n" "fmla v29.8h, v12.8h, v1.h[1] \n" "fmla v30.8h, v12.8h, v1.h[2] \n" "fmla v31.8h, v12.8h, v1.h[3] \n" "fmla v20.8h, v13.8h, v1.h[4] \n" "fmla v21.8h, v13.8h, v1.h[5] \n" "fmla v22.8h, v13.8h, v1.h[6] \n" "fmla v23.8h, v13.8h, v1.h[7] \n" "fmla v24.8h, v13.8h, v2.h[0] \n" "fmla v25.8h, v13.8h, v2.h[1] \n" "fmla v26.8h, v13.8h, v2.h[2] \n" "fmla v27.8h, v13.8h, v2.h[3] \n" "fmla v28.8h, v13.8h, v2.h[4] \n" "fmla v29.8h, v13.8h, v2.h[5] \n" "fmla v30.8h, v13.8h, v2.h[6] \n" "fmla v31.8h, v13.8h, v2.h[7] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%2], #64 \n" // r4567 "fmla v20.8h, v14.8h, v3.h[0] \n" "fmla v21.8h, v14.8h, v3.h[1] \n" "fmla v22.8h, v14.8h, v3.h[2] \n" "fmla v23.8h, v14.8h, v3.h[3] \n" "fmla v24.8h, v14.8h, v3.h[4] \n" "fmla v25.8h, v14.8h, v3.h[5] \n" "fmla v26.8h, v14.8h, v3.h[6] \n" "fmla v27.8h, v14.8h, v3.h[7] \n" "fmla v28.8h, v14.8h, v4.h[0] \n" "fmla v29.8h, v14.8h, v4.h[1] \n" "fmla v30.8h, v14.8h, v4.h[2] \n" "fmla v31.8h, v14.8h, v4.h[3] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%3], #64 \n" // w4567 "fmla v20.8h, v15.8h, v4.h[4] \n" "fmla v21.8h, v15.8h, v4.h[5] \n" "fmla v22.8h, v15.8h, v4.h[6] \n" "fmla v23.8h, v15.8h, v4.h[7] \n" "fmla v24.8h, v15.8h, v5.h[0] \n" "fmla v25.8h, v15.8h, v5.h[1] \n" "fmla v26.8h, v15.8h, v5.h[2] \n" "fmla v27.8h, v15.8h, v5.h[3] \n" "fmla v28.8h, v15.8h, v5.h[4] \n" "fmla v29.8h, v15.8h, v5.h[5] \n" "fmla v30.8h, v15.8h, v5.h[6] \n" "fmla v31.8h, v15.8h, v5.h[7] \n" "fmla v20.8h, v16.8h, v6.h[0] \n" "fmla v21.8h, v16.8h, v6.h[1] \n" "fmla v22.8h, v16.8h, v6.h[2] \n" "fmla v23.8h, v16.8h, v6.h[3] \n" "fmla v24.8h, v16.8h, v6.h[4] \n" "fmla v25.8h, v16.8h, v6.h[5] \n" "fmla v26.8h, v16.8h, v6.h[6] \n" "fmla v27.8h, v16.8h, v6.h[7] \n" "fmla v28.8h, v16.8h, v7.h[0] \n" "fmla v29.8h, v16.8h, v7.h[1] \n" "fmla v30.8h, v16.8h, v7.h[2] \n" "fmla v31.8h, v16.8h, v7.h[3] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%2], #64 \n" // r891011 "fmla v20.8h, v17.8h, v7.h[4] \n" "fmla v21.8h, v17.8h, v7.h[5] \n" "fmla v22.8h, v17.8h, v7.h[6] \n" "fmla v23.8h, v17.8h, v7.h[7] \n" "fmla v24.8h, v17.8h, v8.h[0] \n" "fmla v25.8h, v17.8h, v8.h[1] \n" "fmla v26.8h, v17.8h, v8.h[2] \n" "fmla v27.8h, v17.8h, v8.h[3] \n" "fmla v28.8h, v17.8h, v8.h[4] \n" "fmla v29.8h, v17.8h, v8.h[5] \n" "fmla v30.8h, v17.8h, v8.h[6] \n" "fmla v31.8h, v17.8h, v8.h[7] \n" "fmla v20.8h, v18.8h, v9.h[0] \n" "fmla v21.8h, v18.8h, v9.h[1] \n" "fmla v22.8h, v18.8h, v9.h[2] \n" "fmla v23.8h, v18.8h, v9.h[3] \n" "fmla v24.8h, v18.8h, v9.h[4] \n" "fmla v25.8h, v18.8h, v9.h[5] \n" "fmla v26.8h, v18.8h, v9.h[6] \n" "fmla v27.8h, v18.8h, v9.h[7] \n" "fmla v28.8h, v18.8h, v10.h[0] \n" "fmla v29.8h, v18.8h, v10.h[1] \n" "fmla v30.8h, v18.8h, v10.h[2] \n" "fmla v31.8h, v18.8h, v10.h[3] \n" "subs %w0, %w0, #1 \n" "fmla v20.8h, v19.8h, v10.h[4] \n" "fmla v21.8h, v19.8h, v10.h[5] \n" "fmla v22.8h, v19.8h, v10.h[6] \n" "fmla v23.8h, v19.8h, v10.h[7] \n" "fmla v24.8h, v19.8h, v11.h[0] \n" "fmla v25.8h, v19.8h, v11.h[1] \n" "fmla v26.8h, v19.8h, v11.h[2] \n" "fmla v27.8h, v19.8h, v11.h[3] \n" "fmla v28.8h, v19.8h, v11.h[4] \n" "fmla v29.8h, v19.8h, v11.h[5] \n" "fmla v30.8h, v19.8h, v11.h[6] \n" "fmla v31.8h, v19.8h, v11.h[7] \n" "bne 0b \n" "st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n" "st1 {v24.8h, v25.8h, v26.8h, v27.8h}, [%1], #64 \n" "st1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%1], #64 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr0) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr0), "r"(biasptr) // %8 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 7 < size; i += 8) { __fp16* tmpptr = tmp.channel(i / 12 + (i % 12) / 8); const __fp16* kptr0 = kernel.channel(p); int nn = inch; // inch always > 0 asm volatile( "ld1 {v16.8h}, [%8] \n" "mov v17.16b, v16.16b \n" "mov v18.16b, v16.16b \n" "mov v19.16b, v16.16b \n" "mov v20.16b, v16.16b \n" "mov v21.16b, v16.16b \n" "mov v22.16b, v16.16b \n" "mov v23.16b, v16.16b \n" "0: \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%2], #64 \n" // r0123 "prfm pldl1keep, [%3, #512] \n" "ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%3], #64 \n" // w0123 "fmla v16.8h, v8.8h, v0.h[0] \n" "fmla v17.8h, v8.8h, v0.h[1] \n" "fmla v18.8h, v8.8h, v0.h[2] \n" "fmla v19.8h, v8.8h, v0.h[3] \n" "fmla v20.8h, v8.8h, v0.h[4] \n" "fmla v21.8h, v8.8h, v0.h[5] \n" "fmla v22.8h, v8.8h, v0.h[6] \n" "fmla v23.8h, v8.8h, v0.h[7] \n" "fmla v16.8h, v9.8h, v1.h[0] \n" "fmla v17.8h, v9.8h, v1.h[1] \n" "fmla v18.8h, v9.8h, v1.h[2] \n" "fmla v19.8h, v9.8h, v1.h[3] \n" "fmla v20.8h, v9.8h, v1.h[4] \n" "fmla v21.8h, v9.8h, v1.h[5] \n" "fmla v22.8h, v9.8h, v1.h[6] \n" "fmla v23.8h, v9.8h, v1.h[7] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%2], #64 \n" // r4567 "fmla v16.8h, v10.8h, v2.h[0] \n" "fmla v17.8h, v10.8h, v2.h[1] \n" "fmla v18.8h, v10.8h, v2.h[2] \n" "fmla v19.8h, v10.8h, v2.h[3] \n" "fmla v20.8h, v10.8h, v2.h[4] \n" "fmla v21.8h, v10.8h, v2.h[5] \n" "fmla v22.8h, v10.8h, v2.h[6] \n" "fmla v23.8h, v10.8h, v2.h[7] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%3], #64 \n" // w4567 "fmla v16.8h, v11.8h, v3.h[0] \n" "fmla v17.8h, v11.8h, v3.h[1] \n" "fmla v18.8h, v11.8h, v3.h[2] \n" "fmla v19.8h, v11.8h, v3.h[3] \n" "fmla v20.8h, v11.8h, v3.h[4] \n" "fmla v21.8h, v11.8h, v3.h[5] \n" "fmla v22.8h, v11.8h, v3.h[6] \n" "fmla v23.8h, v11.8h, v3.h[7] \n" "fmla v16.8h, v12.8h, v4.h[0] \n" "fmla v17.8h, v12.8h, v4.h[1] \n" "fmla v18.8h, v12.8h, v4.h[2] \n" "fmla v19.8h, v12.8h, v4.h[3] \n" "fmla v20.8h, v12.8h, v4.h[4] \n" "fmla v21.8h, v12.8h, v4.h[5] \n" "fmla v22.8h, v12.8h, v4.h[6] \n" "fmla v23.8h, v12.8h, v4.h[7] \n" "fmla v16.8h, v13.8h, v5.h[0] \n" "fmla v17.8h, v13.8h, v5.h[1] \n" "fmla v18.8h, v13.8h, v5.h[2] \n" "fmla v19.8h, v13.8h, v5.h[3] \n" "fmla v20.8h, v13.8h, v5.h[4] \n" "fmla v21.8h, v13.8h, v5.h[5] \n" "fmla v22.8h, v13.8h, v5.h[6] \n" "fmla v23.8h, v13.8h, v5.h[7] \n" "fmla v16.8h, v14.8h, v6.h[0] \n" "fmla v17.8h, v14.8h, v6.h[1] \n" "fmla v18.8h, v14.8h, v6.h[2] \n" "fmla v19.8h, v14.8h, v6.h[3] \n" "fmla v20.8h, v14.8h, v6.h[4] \n" "fmla v21.8h, v14.8h, v6.h[5] \n" "fmla v22.8h, v14.8h, v6.h[6] \n" "fmla v23.8h, v14.8h, v6.h[7] \n" "subs %w0, %w0, #1 \n" "fmla v16.8h, v15.8h, v7.h[0] \n" "fmla v17.8h, v15.8h, v7.h[1] \n" "fmla v18.8h, v15.8h, v7.h[2] \n" "fmla v19.8h, v15.8h, v7.h[3] \n" "fmla v20.8h, v15.8h, v7.h[4] \n" "fmla v21.8h, v15.8h, v7.h[5] \n" "fmla v22.8h, v15.8h, v7.h[6] \n" "fmla v23.8h, v15.8h, v7.h[7] \n" "bne 0b \n" "st1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%1], #64 \n" "st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr0) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr0), "r"(biasptr) // %8 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); } for (; i + 3 < size; i += 4) { __fp16* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); const __fp16* kptr0 = kernel.channel(p); int nn = inch; // inch always > 0 asm volatile( "ld1 {v16.8h}, [%8] \n" "mov v17.16b, v16.16b \n" "mov v18.16b, v16.16b \n" "mov v19.16b, v16.16b \n" "0: \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%2], #64 \n" // r0123 "prfm pldl1keep, [%3, #512] \n" "ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%3], #64 \n" // w0123 "fmla v16.8h, v8.8h, v0.h[0] \n" "fmla v17.8h, v8.8h, v1.h[0] \n" "fmla v18.8h, v8.8h, v2.h[0] \n" "fmla v19.8h, v8.8h, v3.h[0] \n" "fmla v16.8h, v9.8h, v0.h[1] \n" "fmla v17.8h, v9.8h, v1.h[1] \n" "fmla v18.8h, v9.8h, v2.h[1] \n" "fmla v19.8h, v9.8h, v3.h[1] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%3], #64 \n" // w4567 "fmla v16.8h, v10.8h, v0.h[2] \n" "fmla v17.8h, v10.8h, v1.h[2] \n" "fmla v18.8h, v10.8h, v2.h[2] \n" "fmla v19.8h, v10.8h, v3.h[2] \n" "fmla v16.8h, v11.8h, v0.h[3] \n" "fmla v17.8h, v11.8h, v1.h[3] \n" "fmla v18.8h, v11.8h, v2.h[3] \n" "fmla v19.8h, v11.8h, v3.h[3] \n" "fmla v16.8h, v12.8h, v0.h[4] \n" "fmla v17.8h, v12.8h, v1.h[4] \n" "fmla v18.8h, v12.8h, v2.h[4] \n" "fmla v19.8h, v12.8h, v3.h[4] \n" "fmla v16.8h, v13.8h, v0.h[5] \n" "fmla v17.8h, v13.8h, v1.h[5] \n" "fmla v18.8h, v13.8h, v2.h[5] \n" "fmla v19.8h, v13.8h, v3.h[5] \n" "fmla v16.8h, v14.8h, v0.h[6] \n" "fmla v17.8h, v14.8h, v1.h[6] \n" "fmla v18.8h, v14.8h, v2.h[6] \n" "fmla v19.8h, v14.8h, v3.h[6] \n" "subs %w0, %w0, #1 \n" "fmla v16.8h, v15.8h, v0.h[7] \n" "fmla v17.8h, v15.8h, v1.h[7] \n" "fmla v18.8h, v15.8h, v2.h[7] \n" "fmla v19.8h, v15.8h, v3.h[7] \n" "bne 0b \n" "st1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%1], #64 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr0) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr0), "r"(biasptr) // %8 : "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19"); } for (; i + 1 < size; i += 2) { __fp16* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); const __fp16* kptr0 = kernel.channel(p); int nn = inch; // inch always > 0 asm volatile( "ld1 {v16.8h}, [%8] \n" "mov v17.16b, v16.16b \n" "0: \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v0.8h, v1.8h}, [%2], #32 \n" // r01 "prfm pldl1keep, [%3, #512] \n" "ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%3], #64 \n" // w0123 "fmla v16.8h, v8.8h, v0.h[0] \n" "fmla v17.8h, v8.8h, v1.h[0] \n" "fmla v16.8h, v9.8h, v0.h[1] \n" "fmla v17.8h, v9.8h, v1.h[1] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%3], #64 \n" // w4567 "fmla v16.8h, v10.8h, v0.h[2] \n" "fmla v17.8h, v10.8h, v1.h[2] \n" "fmla v16.8h, v11.8h, v0.h[3] \n" "fmla v17.8h, v11.8h, v1.h[3] \n" "fmla v16.8h, v12.8h, v0.h[4] \n" "fmla v17.8h, v12.8h, v1.h[4] \n" "fmla v16.8h, v13.8h, v0.h[5] \n" "fmla v17.8h, v13.8h, v1.h[5] \n" "fmla v16.8h, v14.8h, v0.h[6] \n" "fmla v17.8h, v14.8h, v1.h[6] \n" "subs %w0, %w0, #1 \n" "fmla v16.8h, v15.8h, v0.h[7] \n" "fmla v17.8h, v15.8h, v1.h[7] \n" "bne 0b \n" "st1 {v16.8h, v17.8h}, [%1], #32 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr0) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr0), "r"(biasptr) // %8 : "cc", "memory", "v0", "v1", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17"); } for (; i < size; i++) { __fp16* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); const __fp16* kptr0 = kernel.channel(p); int nn = inch; // inch always > 0 asm volatile( "ld1 {v16.8h}, [%8] \n" "0: \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v0.8h}, [%2], #16 \n" // r0 "prfm pldl1keep, [%3, #512] \n" "ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%3], #64 \n" // w0123 "fmla v16.8h, v8.8h, v0.h[0] \n" "fmla v16.8h, v9.8h, v0.h[1] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%3], #64 \n" // w4567 "fmla v16.8h, v10.8h, v0.h[2] \n" "fmla v16.8h, v11.8h, v0.h[3] \n" "fmla v16.8h, v12.8h, v0.h[4] \n" "fmla v16.8h, v13.8h, v0.h[5] \n" "subs %w0, %w0, #1 \n" "fmla v16.8h, v14.8h, v0.h[6] \n" "fmla v16.8h, v15.8h, v0.h[7] \n" "bne 0b \n" "st1 {v16.8h}, [%1], #16 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr0) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr0), "r"(biasptr) // %8 : "cc", "memory", "v0", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16"); } } // // NOTE sgemm // for (; p<outch; p++) // { // Mat out0 = top_blob.channel(p); // // const __fp16 bias0 = bias ? bias[p] : 0.f; // // __fp16* outptr0 = out0; // // for (int i=0; i<size; i++) // { // __fp16 sum = bias0; // // const __fp16* kptr = _kernel.channel(p); // // for (int q=0; q<inch; q++) // { // const __fp16* img0 = bottom_blob.channel(q); // // sum += img0[i] * kptr[0]; // kptr ++; // } // // outptr0[i] = sum; // } // } } static void conv1x1s2_pack8_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int channels = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; const int tailstep = (w - 2 * outw + w) * 8; Mat bottom_blob_shrinked; bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < channels; p++) { const __fp16* r0 = bottom_blob.channel(p); __fp16* outptr = bottom_blob_shrinked.channel(p); for (int i = 0; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { float16x8_t _v0 = vld1q_f16(r0); float16x8_t _v1 = vld1q_f16(r0 + 16); float16x8_t _v2 = vld1q_f16(r0 + 32); float16x8_t _v3 = vld1q_f16(r0 + 48); vst1q_f16(outptr, _v0); vst1q_f16(outptr + 8, _v1); vst1q_f16(outptr + 16, _v2); vst1q_f16(outptr + 24, _v3); r0 += 64; outptr += 32; } for (; j + 1 < outw; j += 2) { float16x8_t _v0 = vld1q_f16(r0); float16x8_t _v1 = vld1q_f16(r0 + 16); vst1q_f16(outptr, _v0); vst1q_f16(outptr + 8, _v1); r0 += 32; outptr += 16; } for (; j < outw; j++) { float16x8_t _v = vld1q_f16(r0); vst1q_f16(outptr, _v); r0 += 16; outptr += 8; } r0 += tailstep; } } conv1x1s1_sgemm_pack8_fp16sa_neon(bottom_blob_shrinked, top_blob, kernel, _bias, opt); }
sections-2.c
/* { dg-do run } */ #include <stdlib.h> #include <unistd.h> __attribute__((noinline, noclone, noreturn)) void foo () { sleep (4); exit (0); } int main () { #pragma omp parallel { #pragma omp sections { foo (); #pragma omp section foo (); #pragma omp section foo (); } } return 0; }
memBw.c
/** $lic$ * Copyright (C) 2016-2017 by The Board of Trustees of Cornell University * Copyright (C) 2013-2016 by The Board of Trustees of Stanford University * * This file is part of iBench. * * iBench is free software; you can redistribute it and/or modify it under the * terms of the Modified BSD-3 License as published by the Open Source Initiative. * * If you use this software in your research, we request that you reference * the iBench paper ("iBench: Quantifying Interference for Datacenter Applications", * Delimitrou and Kozyrakis, IISWC'13, September 2013) as the source of the benchmark * suite in any publications that use this software, and that * you send us a citation of your work. * * iBench is distributed in the hope that it will be useful, but WITHOUT ANY * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS * FOR A PARTICULAR PURPOSE. See the BSD-3 License for more details. * * You should have received a copy of the Modified BSD-3 License along with * this program. If not, see <https://opensource.org/licenses/BSD-3-Clause>. **/ #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <unistd.h> #include <time.h> #include <math.h> #include <float.h> #ifndef N # define N 2000000 #endif #ifndef NTIMES # define NTIMES 100 #endif #ifndef OFFSET # define OFFSET 0 #endif static double bwData[N+OFFSET], b[N+OFFSET], c[N+OFFSET]; unsigned int bwStreamSize = 2*N; //#ifdef _OPENMP extern int omp_get_num_threads(); //#endif int main (int argc, char **argv) { //#ifdef _OPENMP //#pragma omp parallel // { //#pragma omp master // { // register int k = omp_get_num_threads(); // printf ("Number of Threads requested = %i\n",k); // } // } //#endif double scalar = 3.0; /*Usage: ./memBw <duration in sec>*/ if (argc < 2) { printf("Usage: ./memBw <duration in sec>\n"); exit(0); } unsigned int usr_timer = atoi(argv[1]); double time_spent = 0.0; while (time_spent<usr_timer) { double *mid=bwData+(bwStreamSize/2); clock_t begin = clock(); #pragma omp parallel for for (int i=0; i<bwStreamSize/2; i++) { bwData[i]= scalar*mid[i]; } #pragma omp parallel for for (int i=0; i<bwStreamSize/2; i++) { mid[i]= scalar*bwData[i]; } clock_t end = clock(); time_spent += (double)(end - begin) / CLOCKS_PER_SEC; } return 0; }
exchange_boundary.c
//------------------------------------------------------------------------------------------------------------------------------ // Samuel Williams // SWWilliams@lbl.gov // Lawrence Berkeley National Lab //------------------------------------------------------------------------------------------------------------------------------ // perform a (intra-level) ghost zone exchange // NOTE exchange_boundary() only exchanges the boundary. // It will not enforce any boundary conditions // BC's are either the responsibility of a separate function or should be fused into the stencil void exchange_boundary(level_type * level, int id, int justFaces){ uint64_t _timeCommunicationStart = CycleTime(); uint64_t _timeStart,_timeEnd; int my_tag = (level->tag<<4) | justFaces; int buffer=0; int n; if(justFaces)justFaces=1;else justFaces=0; // must be 0 or 1 in order to index into exchange_ghosts[] #ifdef USE_MPI int nMessages = level->exchange_ghosts[justFaces].num_recvs + level->exchange_ghosts[justFaces].num_sends; MPI_Request *recv_requests = level->exchange_ghosts[justFaces].requests; MPI_Request *send_requests = level->exchange_ghosts[justFaces].requests + level->exchange_ghosts[justFaces].num_recvs; // loop through packed list of MPI receives and prepost Irecv's... _timeStart = CycleTime(); #ifdef USE_MPI_THREAD_MULTIPLE #pragma omp parallel for schedule(dynamic,1) #endif for(n=0;n<level->exchange_ghosts[justFaces].num_recvs;n++){ MPI_Irecv(level->exchange_ghosts[justFaces].recv_buffers[n], level->exchange_ghosts[justFaces].recv_sizes[n], MPI_DOUBLE, level->exchange_ghosts[justFaces].recv_ranks[n], my_tag, MPI_COMM_WORLD, &recv_requests[n] ); } _timeEnd = CycleTime(); level->cycles.ghostZone_recv += (_timeEnd-_timeStart); // pack MPI send buffers... _timeStart = CycleTime(); PRAGMA_THREAD_ACROSS_BLOCKS(level,buffer,level->exchange_ghosts[justFaces].num_blocks[0]) for(buffer=0;buffer<level->exchange_ghosts[justFaces].num_blocks[0];buffer++){ CopyBlock(level,id,&level->exchange_ghosts[justFaces].blocks[0][buffer]); } _timeEnd = CycleTime(); level->cycles.ghostZone_pack += (_timeEnd-_timeStart); // loop through MPI send buffers and post Isend's... _timeStart = CycleTime(); #ifdef USE_MPI_THREAD_MULTIPLE #pragma omp parallel for schedule(dynamic,1) #endif for(n=0;n<level->exchange_ghosts[justFaces].num_sends;n++){ MPI_Isend(level->exchange_ghosts[justFaces].send_buffers[n], level->exchange_ghosts[justFaces].send_sizes[n], MPI_DOUBLE, level->exchange_ghosts[justFaces].send_ranks[n], my_tag, MPI_COMM_WORLD, &send_requests[n] ); } _timeEnd = CycleTime(); level->cycles.ghostZone_send += (_timeEnd-_timeStart); #endif // exchange locally... try and hide within Isend latency... _timeStart = CycleTime(); PRAGMA_THREAD_ACROSS_BLOCKS(level,buffer,level->exchange_ghosts[justFaces].num_blocks[1]) for(buffer=0;buffer<level->exchange_ghosts[justFaces].num_blocks[1];buffer++){ CopyBlock(level,id,&level->exchange_ghosts[justFaces].blocks[1][buffer]); } _timeEnd = CycleTime(); level->cycles.ghostZone_local += (_timeEnd-_timeStart); // wait for MPI to finish... #ifdef USE_MPI _timeStart = CycleTime(); if(nMessages)MPI_Waitall(nMessages,level->exchange_ghosts[justFaces].requests,level->exchange_ghosts[justFaces].status); _timeEnd = CycleTime(); level->cycles.ghostZone_wait += (_timeEnd-_timeStart); // unpack MPI receive buffers _timeStart = CycleTime(); PRAGMA_THREAD_ACROSS_BLOCKS(level,buffer,level->exchange_ghosts[justFaces].num_blocks[2]) for(buffer=0;buffer<level->exchange_ghosts[justFaces].num_blocks[2];buffer++){ CopyBlock(level,id,&level->exchange_ghosts[justFaces].blocks[2][buffer]); } _timeEnd = CycleTime(); level->cycles.ghostZone_unpack += (_timeEnd-_timeStart); #endif level->cycles.ghostZone_total += (uint64_t)(CycleTime()-_timeCommunicationStart); }
HYPRE_parcsr_pcg.c
/*BHEADER********************************************************************** * Copyright (c) 2008, Lawrence Livermore National Security, LLC. * Produced at the Lawrence Livermore National Laboratory. * This file is part of HYPRE. See file COPYRIGHT for details. * * HYPRE is free software; you can redistribute it and/or modify it under the * terms of the GNU Lesser General Public License (as published by the Free * Software Foundation) version 2.1 dated February 1999. * * $Revision$ ***********************************************************************EHEADER*/ #include "_hypre_parcsr_ls.h" /*-------------------------------------------------------------------------- * HYPRE_ParCSRPCGCreate *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_ParCSRPCGCreate( MPI_Comm comm, HYPRE_Solver *solver ) { hypre_PCGFunctions * pcg_functions; if (!solver) { hypre_error_in_arg(2); return hypre_error_flag; } pcg_functions = hypre_PCGFunctionsCreate( hypre_CAlloc, hypre_ParKrylovFree, hypre_ParKrylovCommInfo, hypre_ParKrylovCreateVector, hypre_ParKrylovDestroyVector, hypre_ParKrylovMatvecCreate, hypre_ParKrylovMatvec, hypre_ParKrylovMatvecDestroy, hypre_ParKrylovInnerProd, hypre_ParKrylovCopyVector, hypre_ParKrylovClearVector, hypre_ParKrylovScaleVector, hypre_ParKrylovAxpy, hypre_ParKrylovIdentitySetup, hypre_ParKrylovIdentity ); *solver = ( (HYPRE_Solver) hypre_PCGCreate( pcg_functions ) ); return hypre_error_flag; } /*-------------------------------------------------------------------------- * HYPRE_ParCSRPCGDestroy *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_ParCSRPCGDestroy( HYPRE_Solver solver ) { return( hypre_PCGDestroy( (void *) solver ) ); } /*-------------------------------------------------------------------------- * HYPRE_ParCSRPCGSetup *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_ParCSRPCGSetup( HYPRE_Solver solver, HYPRE_ParCSRMatrix A, HYPRE_ParVector b, HYPRE_ParVector x ) { return( HYPRE_PCGSetup( solver, (HYPRE_Matrix) A, (HYPRE_Vector) b, (HYPRE_Vector) x ) ); } /*-------------------------------------------------------------------------- * HYPRE_ParCSRPCGSolve *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_ParCSRPCGSolve( HYPRE_Solver solver, HYPRE_ParCSRMatrix A, HYPRE_ParVector b, HYPRE_ParVector x ) { return( HYPRE_PCGSolve( solver, (HYPRE_Matrix) A, (HYPRE_Vector) b, (HYPRE_Vector) x ) ); } /*-------------------------------------------------------------------------- * HYPRE_ParCSRPCGSetTol *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_ParCSRPCGSetTol( HYPRE_Solver solver, HYPRE_Real tol ) { return( HYPRE_PCGSetTol( solver, tol ) ); } /*-------------------------------------------------------------------------- * HYPRE_ParCSRPCGSetAbsoluteTol *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_ParCSRPCGSetAbsoluteTol( HYPRE_Solver solver, HYPRE_Real a_tol ) { return( HYPRE_PCGSetAbsoluteTol( solver, a_tol ) ); } /*-------------------------------------------------------------------------- * HYPRE_ParCSRPCGSetMaxIter *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_ParCSRPCGSetMaxIter( HYPRE_Solver solver, HYPRE_Int max_iter ) { return( HYPRE_PCGSetMaxIter( solver, max_iter ) ); } /*-------------------------------------------------------------------------- * HYPRE_ParCSRPCGSetStopCrit *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_ParCSRPCGSetStopCrit( HYPRE_Solver solver, HYPRE_Int stop_crit ) { return( HYPRE_PCGSetStopCrit( solver, stop_crit ) ); } /*-------------------------------------------------------------------------- * HYPRE_ParCSRPCGSetTwoNorm *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_ParCSRPCGSetTwoNorm( HYPRE_Solver solver, HYPRE_Int two_norm ) { return( HYPRE_PCGSetTwoNorm( solver, two_norm ) ); } /*-------------------------------------------------------------------------- * HYPRE_ParCSRPCGSetRelChange *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_ParCSRPCGSetRelChange( HYPRE_Solver solver, HYPRE_Int rel_change ) { return( HYPRE_PCGSetRelChange( solver, rel_change ) ); } /*-------------------------------------------------------------------------- * HYPRE_ParCSRPCGSetPrecond *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_ParCSRPCGSetPrecond( HYPRE_Solver solver, HYPRE_PtrToParSolverFcn precond, HYPRE_PtrToParSolverFcn precond_setup, HYPRE_Solver precond_solver ) { return( HYPRE_PCGSetPrecond( solver, (HYPRE_PtrToSolverFcn) precond, (HYPRE_PtrToSolverFcn) precond_setup, precond_solver ) ); } /*-------------------------------------------------------------------------- * HYPRE_ParCSRPCGGetPrecond *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_ParCSRPCGGetPrecond( HYPRE_Solver solver, HYPRE_Solver *precond_data_ptr ) { return( HYPRE_PCGGetPrecond( solver, precond_data_ptr ) ); } /*-------------------------------------------------------------------------- * HYPRE_ParCSRPCGSetPrintLevel * an obsolete function; use HYPRE_PCG* functions instead *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_ParCSRPCGSetPrintLevel( HYPRE_Solver solver, HYPRE_Int level ) { return( HYPRE_PCGSetPrintLevel( solver, level ) ); } /*-------------------------------------------------------------------------- * HYPRE_ParCSRPCGSetLogging * an obsolete function; use HYPRE_PCG* functions instead *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_ParCSRPCGSetLogging( HYPRE_Solver solver, HYPRE_Int level ) { return( HYPRE_PCGSetLogging( solver, level ) ); } /*-------------------------------------------------------------------------- * HYPRE_ParCSRPCGGetNumIterations *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_ParCSRPCGGetNumIterations( HYPRE_Solver solver, HYPRE_Int *num_iterations ) { return( HYPRE_PCGGetNumIterations( solver, num_iterations ) ); } /*-------------------------------------------------------------------------- * HYPRE_ParCSRPCGGetFinalRelativeResidualNorm *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_ParCSRPCGGetFinalRelativeResidualNorm( HYPRE_Solver solver, HYPRE_Real *norm ) { return( HYPRE_PCGGetFinalRelativeResidualNorm( solver, norm ) ); } /*-------------------------------------------------------------------------- * HYPRE_ParCSRPCGGetResidual *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_ParCSRPCGGetResidual( HYPRE_Solver solver, HYPRE_ParVector *residual ) { return( HYPRE_PCGGetResidual( solver, (void *) residual ) ); } /*-------------------------------------------------------------------------- * HYPRE_ParCSRDiagScaleSetup *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_ParCSRDiagScaleSetup( HYPRE_Solver solver, HYPRE_ParCSRMatrix A, HYPRE_ParVector y, HYPRE_ParVector x ) { return 0; } /*-------------------------------------------------------------------------- * HYPRE_ParCSRDiagScale *--------------------------------------------------------------------------*/ HYPRE_Int HYPRE_ParCSRDiagScale( HYPRE_Solver solver, HYPRE_ParCSRMatrix HA, HYPRE_ParVector Hy, HYPRE_ParVector Hx ) { hypre_ParCSRMatrix *A = (hypre_ParCSRMatrix *) HA; hypre_ParVector *y = (hypre_ParVector *) Hy; hypre_ParVector *x = (hypre_ParVector *) Hx; HYPRE_Real *x_data = hypre_VectorData(hypre_ParVectorLocalVector(x)); HYPRE_Real *y_data = hypre_VectorData(hypre_ParVectorLocalVector(y)); HYPRE_Real *A_data = hypre_CSRMatrixData(hypre_ParCSRMatrixDiag(A)); HYPRE_Int *A_i = hypre_CSRMatrixI(hypre_ParCSRMatrixDiag(A)); HYPRE_Int local_size = hypre_VectorSize(hypre_ParVectorLocalVector(x)); HYPRE_Int ierr = 0; #if defined(HYPRE_USING_GPU) && defined(HYPRE_USING_UNIFIED_MEMORY) DiagScaleVector(x_data, y_data, A_data, A_i, local_size, HYPRE_STREAM(4)); #elif defined(HYPRE_USING_DEVICE_OPENMP) && defined(HYPRE_USING_UNIFIED_MEMORY) HYPRE_Int i; #pragma omp target teams distribute parallel for private(i) is_device_ptr(x_data,y_data,A_data,A_i) for (i=0; i < local_size; i++) { x_data[i] = y_data[i]/A_data[A_i[i]]; } #else HYPRE_Int i; #if (HYPRE_USING_OPENMP) #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i=0; i < local_size; i++) { x_data[i] = y_data[i]/A_data[A_i[i]]; } #endif return ierr; } /*-------------------------------------------------------------------------- * HYPRE_ParCSRSymPrecondSetup *--------------------------------------------------------------------------*/ /* HYPRE_Int HYPRE_ParCSRSymPrecondSetup( HYPRE_Solver solver, HYPRE_ParCSRMatrix A, HYPRE_ParVector b, HYPRE_ParVector x ) { hypre_ParCSRMatrix *A = (hypre_ParCSRMatrix *) A; hypre_ParVector *y = (hypre_ParVector *) b; hypre_ParVector *x = (hypre_ParVector *) x; HYPRE_Real *x_data = hypre_VectorData(hypre_ParVectorLocalVector(x)); HYPRE_Real *y_data = hypre_VectorData(hypre_ParVectorLocalVector(y)); HYPRE_Real *A_diag = hypre_CSRMatrixData(hypre_ParCSRMatrixDiag(A)); HYPRE_Real *A_offd = hypre_CSRMatrixData(hypre_ParCSRMatrixOffD(A)); HYPRE_Int i, ierr = 0; hypre_ParCSRMatrix *Asym; MPI_Comm comm; HYPRE_Int global_num_rows; HYPRE_Int global_num_cols; HYPRE_Int *row_starts; HYPRE_Int *col_starts; HYPRE_Int num_cols_offd; HYPRE_Int num_nonzeros_diag; HYPRE_Int num_nonzeros_offd; Asym = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_cols, row_starts, col_starts, num_cols_offd, num_nonzeros_diag, num_nonzeros_offd); for (i=0; i < hypre_VectorSize(hypre_ParVectorLocalVector(x)); i++) { x_data[i] = y_data[i]/A_data[A_i[i]]; } return ierr; } */
attribute.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % AAA TTTTT TTTTT RRRR IIIII BBBB U U TTTTT EEEEE % % A A T T R R I B B U U T E % % AAAAA T T RRRR I BBBB U U T EEE % % A A T T R R I B B U U T E % % A A T T R R IIIII BBBB UUU T EEEEE % % % % % % MagickCore Get / Set Image Attributes % % % % Software Design % % Cristy % % October 2002 % % % % % % Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/client.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colormap.h" #include "MagickCore/colormap-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/composite-private.h" #include "MagickCore/constitute.h" #include "MagickCore/draw.h" #include "MagickCore/draw-private.h" #include "MagickCore/effect.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/geometry.h" #include "MagickCore/histogram.h" #include "MagickCore/identify.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/memory_.h" #include "MagickCore/magick.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/paint.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/property.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum-private.h" #include "MagickCore/random_.h" #include "MagickCore/resource_.h" #include "MagickCore/semaphore.h" #include "MagickCore/segment.h" #include "MagickCore/splay-tree.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/threshold.h" #include "MagickCore/transform.h" #include "MagickCore/utility.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e B o u n d i n g B o x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageBoundingBox() returns the bounding box of an image canvas. % % The format of the GetImageBoundingBox method is: % % RectangleInfo GetImageBoundingBox(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o bounds: Method GetImageBoundingBox returns the bounding box of an % image canvas. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ typedef struct _EdgeInfo { double left, right, top, bottom; } EdgeInfo; static double GetEdgeBackgroundCensus(const Image *image, const CacheView *image_view,const GravityType gravity,const size_t width, const size_t height,const ssize_t x_offset,const ssize_t y_offset, ExceptionInfo *exception) { CacheView *edge_view; const char *artifact; double census; Image *edge_image; PixelInfo background, pixel; RectangleInfo edge_geometry; const Quantum *p; ssize_t y; /* Determine the percent of image background for this edge. */ switch (gravity) { case NorthWestGravity: case NorthGravity: default: { p=GetCacheViewVirtualPixels(image_view,0,0,1,1,exception); break; } case NorthEastGravity: case EastGravity: { p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,0,1,1, exception); break; } case SouthEastGravity: case SouthGravity: { p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1, (ssize_t) image->rows-1,1,1,exception); break; } case SouthWestGravity: case WestGravity: { p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-1,1,1, exception); break; } } GetPixelInfoPixel(image,p,&background); artifact=GetImageArtifact(image,"background"); if (artifact != (const char *) NULL) (void) QueryColorCompliance(artifact,AllCompliance,&background,exception); artifact=GetImageArtifact(image,"trim:background-color"); if (artifact != (const char *) NULL) (void) QueryColorCompliance(artifact,AllCompliance,&background,exception); edge_geometry.width=width; edge_geometry.height=height; edge_geometry.x=x_offset; edge_geometry.y=y_offset; GravityAdjustGeometry(image->columns,image->rows,gravity,&edge_geometry); edge_image=CropImage(image,&edge_geometry,exception); if (edge_image == (Image *) NULL) return(0.0); census=0.0; edge_view=AcquireVirtualCacheView(edge_image,exception); for (y=0; y < (ssize_t) edge_image->rows; y++) { ssize_t x; p=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) edge_image->columns; x++) { GetPixelInfoPixel(edge_image,p,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,&background) == MagickFalse) census++; p+=GetPixelChannels(edge_image); } } census/=((double) edge_image->columns*edge_image->rows); edge_view=DestroyCacheView(edge_view); edge_image=DestroyImage(edge_image); return(census); } static inline double GetMinEdgeBackgroundCensus(const EdgeInfo *edge) { double census; census=MagickMin(MagickMin(MagickMin(edge->left,edge->right),edge->top), edge->bottom); return(census); } static RectangleInfo GetEdgeBoundingBox(const Image *image, ExceptionInfo *exception) { CacheView *edge_view; const char *artifact; double background_census, percent_background; EdgeInfo edge, vertex; Image *edge_image; RectangleInfo bounds; /* Get the image bounding box. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); SetGeometry(image,&bounds); edge_image=CloneImage(image,0,0,MagickTrue,exception); if (edge_image == (Image *) NULL) return(bounds); (void) ParseAbsoluteGeometry("0x0+0+0",&edge_image->page); (void) memset(&vertex,0,sizeof(vertex)); edge_view=AcquireVirtualCacheView(edge_image,exception); edge.left=GetEdgeBackgroundCensus(edge_image,edge_view,WestGravity, 1,0,0,0,exception); edge.right=GetEdgeBackgroundCensus(edge_image,edge_view,EastGravity, 1,0,0,0,exception); edge.top=GetEdgeBackgroundCensus(edge_image,edge_view,NorthGravity, 0,1,0,0,exception); edge.bottom=GetEdgeBackgroundCensus(edge_image,edge_view,SouthGravity, 0,1,0,0,exception); percent_background=1.0; artifact=GetImageArtifact(edge_image,"trim:percent-background"); if (artifact != (const char *) NULL) percent_background=StringToDouble(artifact,(char **) NULL)/100.0; percent_background=MagickMin(MagickMax(1.0-percent_background,MagickEpsilon), 1.0); background_census=GetMinEdgeBackgroundCensus(&edge); for ( ; background_census < percent_background; background_census=GetMinEdgeBackgroundCensus(&edge)) { if ((bounds.width == 0) || (bounds.height == 0)) break; if (fabs(edge.left-background_census) < MagickEpsilon) { /* Trim left edge. */ vertex.left++; bounds.width--; edge.left=GetEdgeBackgroundCensus(edge_image,edge_view, NorthWestGravity,1,bounds.height,(ssize_t) vertex.left,(ssize_t) vertex.top,exception); edge.top=GetEdgeBackgroundCensus(edge_image,edge_view, NorthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t) vertex.top,exception); edge.bottom=GetEdgeBackgroundCensus(edge_image,edge_view, SouthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t) vertex.bottom,exception); continue; } if (fabs(edge.right-background_census) < MagickEpsilon) { /* Trim right edge. */ vertex.right++; bounds.width--; edge.right=GetEdgeBackgroundCensus(edge_image,edge_view, NorthEastGravity,1,bounds.height,(ssize_t) vertex.right,(ssize_t) vertex.top,exception); edge.top=GetEdgeBackgroundCensus(edge_image,edge_view, NorthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t) vertex.top,exception); edge.bottom=GetEdgeBackgroundCensus(edge_image,edge_view, SouthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t) vertex.bottom,exception); continue; } if (fabs(edge.top-background_census) < MagickEpsilon) { /* Trim top edge. */ vertex.top++; bounds.height--; edge.left=GetEdgeBackgroundCensus(edge_image,edge_view, NorthWestGravity,1,bounds.height,(ssize_t) vertex.left,(ssize_t) vertex.top,exception); edge.right=GetEdgeBackgroundCensus(edge_image,edge_view, NorthEastGravity,1,bounds.height,(ssize_t) vertex.right,(ssize_t) vertex.top,exception); edge.top=GetEdgeBackgroundCensus(edge_image,edge_view, NorthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t) vertex.top,exception); continue; } if (fabs(edge.bottom-background_census) < MagickEpsilon) { /* Trim bottom edge. */ vertex.bottom++; bounds.height--; edge.left=GetEdgeBackgroundCensus(edge_image,edge_view, NorthWestGravity,1,bounds.height,(ssize_t) vertex.left,(ssize_t) vertex.top,exception); edge.right=GetEdgeBackgroundCensus(edge_image,edge_view, NorthEastGravity,1,bounds.height,(ssize_t) vertex.right,(ssize_t) vertex.top,exception); edge.bottom=GetEdgeBackgroundCensus(edge_image,edge_view, SouthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t) vertex.bottom,exception); continue; } } edge_view=DestroyCacheView(edge_view); edge_image=DestroyImage(edge_image); bounds.x=(ssize_t) vertex.left; bounds.y=(ssize_t) vertex.top; if ((bounds.width == 0) || (bounds.height == 0)) (void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, "GeometryDoesNotContainImage","`%s'",image->filename); return(bounds); } MagickExport RectangleInfo GetImageBoundingBox(const Image *image, ExceptionInfo *exception) { CacheView *image_view; const char *artifact; MagickBooleanType status; PixelInfo target[4], zero; RectangleInfo bounds; const Quantum *p; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); artifact=GetImageArtifact(image,"trim:percent-background"); if (artifact != (const char *) NULL) return(GetEdgeBoundingBox(image,exception)); artifact=GetImageArtifact(image, "trim:edges"); if (artifact == (const char *) NULL) { bounds.width=image->columns == 1 ? 1 : 0; bounds.height=image->rows == 1 ? 1 : 0; bounds.x=(ssize_t) image->columns; bounds.y=(ssize_t) image->rows; } else { char *edges, *p, *q; bounds.width=(size_t) image->columns; bounds.height=(size_t) image->rows; bounds.x=0; bounds.y=0; edges=AcquireString(artifact); q=edges; while ((p=StringToken(",",&q)) != (char *) NULL) { if (LocaleCompare(p,"north") == 0) bounds.y=(ssize_t) image->rows; if (LocaleCompare(p,"east") == 0) bounds.width=0; if (LocaleCompare(p,"south") == 0) bounds.height=0; if (LocaleCompare(p,"west") == 0) bounds.x=(ssize_t) image->columns; } edges=DestroyString(edges); } GetPixelInfo(image,&target[0]); image_view=AcquireVirtualCacheView(image,exception); p=GetCacheViewVirtualPixels(image_view,0,0,1,1,exception); if (p == (const Quantum *) NULL) { image_view=DestroyCacheView(image_view); return(bounds); } GetPixelInfoPixel(image,p,&target[0]); GetPixelInfo(image,&target[1]); p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,0,1,1, exception); if (p != (const Quantum *) NULL) GetPixelInfoPixel(image,p,&target[1]); GetPixelInfo(image,&target[2]); p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-1,1,1, exception); if (p != (const Quantum *) NULL) GetPixelInfoPixel(image,p,&target[2]); p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,(ssize_t) image->rows-1,1,1,exception); if (p != (const Quantum *) NULL) GetPixelInfoPixel(image,p,&target[3]); status=MagickTrue; GetPixelInfo(image,&zero); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { PixelInfo pixel; RectangleInfo bounding_box; const Quantum *magick_restrict p; ssize_t x; if (status == MagickFalse) continue; #if defined(MAGICKCORE_OPENMP_SUPPORT) # pragma omp critical (MagickCore_GetImageBoundingBox) #endif bounding_box=bounds; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,p,&pixel); if ((x < bounding_box.x) && (IsFuzzyEquivalencePixelInfo(&pixel,&target[0]) == MagickFalse)) bounding_box.x=x; if ((x > (ssize_t) bounding_box.width) && (IsFuzzyEquivalencePixelInfo(&pixel,&target[1]) == MagickFalse)) bounding_box.width=(size_t) x; if ((y < bounding_box.y) && (IsFuzzyEquivalencePixelInfo(&pixel,&target[0]) == MagickFalse)) bounding_box.y=y; if ((y > (ssize_t) bounding_box.height) && (IsFuzzyEquivalencePixelInfo(&pixel,&target[2]) == MagickFalse)) bounding_box.height=(size_t) y; if ((x < (ssize_t) bounding_box.width) && (y > (ssize_t) bounding_box.height) && (IsFuzzyEquivalencePixelInfo(&pixel,&target[3]) == MagickFalse)) { bounding_box.width=(size_t) x; bounding_box.height=(size_t) y; } p+=GetPixelChannels(image); } #if defined(MAGICKCORE_OPENMP_SUPPORT) # pragma omp critical (MagickCore_GetImageBoundingBox) #endif { if (bounding_box.x < bounds.x) bounds.x=bounding_box.x; if (bounding_box.y < bounds.y) bounds.y=bounding_box.y; if (bounding_box.width > bounds.width) bounds.width=bounding_box.width; if (bounding_box.height > bounds.height) bounds.height=bounding_box.height; } } image_view=DestroyCacheView(image_view); if ((bounds.width == 0) || (bounds.height == 0)) (void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, "GeometryDoesNotContainImage","`%s'",image->filename); else { bounds.width-=(bounds.x-1); bounds.height-=(bounds.y-1); } return(bounds); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C o n v e x H u l l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageConvexHull() returns the convex hull points of an image canvas. % % The format of the GetImageConvexHull method is: % % PointInfo *GetImageConvexHull(const Image *image, % size_t number_vertices,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o number_vertices: the number of vertices in the convex hull. % % o exception: return any errors or warnings in this structure. % */ static double LexicographicalOrder(PointInfo *a,PointInfo *b,PointInfo *c) { /* Order by x-coordinate, and in case of a tie, by y-coordinate. */ return((b->x-a->x)*(c->y-a->y)-(b->y-a->y)*(c->x-a->x)); } static PixelInfo GetEdgeBackgroundColor(const Image *image, const CacheView *image_view,ExceptionInfo *exception) { const char *artifact; double census[4], edge_census; PixelInfo background[4], edge_background; ssize_t i; /* Most dominant color of edges/corners is the background color of the image. */ artifact=GetImageArtifact(image,"convex-hull:background-color"); if (artifact == (const char *) NULL) artifact=GetImageArtifact(image,"background"); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i < 4; i++) { CacheView *edge_view; GravityType gravity; Image *edge_image; PixelInfo pixel; RectangleInfo edge_geometry; const Quantum *p; ssize_t y; census[i]=0.0; (void) memset(&edge_geometry,0,sizeof(edge_geometry)); switch (i) { case 0: default: { p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-1,1,1, exception); gravity=WestGravity; edge_geometry.width=1; edge_geometry.height=0; break; } case 1: { p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,0,1,1, exception); gravity=EastGravity; edge_geometry.width=1; edge_geometry.height=0; break; } case 2: { p=GetCacheViewVirtualPixels(image_view,0,0,1,1,exception); gravity=NorthGravity; edge_geometry.width=0; edge_geometry.height=1; break; } case 3: { p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1, (ssize_t) image->rows-1,1,1,exception); gravity=SouthGravity; edge_geometry.width=0; edge_geometry.height=1; break; } } GetPixelInfoPixel(image,p,background+i); if (artifact != (const char *) NULL) (void) QueryColorCompliance(artifact,AllCompliance,background+i, exception); GravityAdjustGeometry(image->columns,image->rows,gravity,&edge_geometry); edge_image=CropImage(image,&edge_geometry,exception); if (edge_image == (Image *) NULL) continue; edge_view=AcquireVirtualCacheView(edge_image,exception); for (y=0; y < (ssize_t) edge_image->rows; y++) { ssize_t x; p=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1, exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) edge_image->columns; x++) { GetPixelInfoPixel(edge_image,p,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,background+i) == MagickFalse) census[i]++; p+=GetPixelChannels(edge_image); } } edge_view=DestroyCacheView(edge_view); edge_image=DestroyImage(edge_image); } edge_census=(-1.0); for (i=0; i < 4; i++) if (census[i] > edge_census) { edge_background=background[i]; edge_census=census[i]; } return(edge_background); } void TraceConvexHull(PointInfo *vertices,size_t number_vertices, PointInfo ***monotone_chain,size_t *chain_length) { PointInfo **chain; ssize_t i; size_t demark, n; /* Construct the upper and lower hulls: rightmost to leftmost counterclockwise. */ chain=(*monotone_chain); n=0; for (i=0; i < (ssize_t) number_vertices; i++) { while ((n >= 2) && (LexicographicalOrder(chain[n-2],chain[n-1],&vertices[i]) <= 0.0)) n--; chain[n++]=(&vertices[i]); } demark=n+1; for (i=(ssize_t) number_vertices-2; i >= 0; i--) { while ((n >= demark) && (LexicographicalOrder(chain[n-2],chain[n-1],&vertices[i]) <= 0.0)) n--; chain[n++]=(&vertices[i]); } *chain_length=n; } MagickExport PointInfo *GetImageConvexHull(const Image *image, size_t *number_vertices,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; MemoryInfo *monotone_info, *vertices_info; PixelInfo background; PointInfo *convex_hull, **monotone_chain, *vertices; size_t n; ssize_t y; /* Identify convex hull vertices of image foreground object(s). */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); *number_vertices=0; vertices_info=AcquireVirtualMemory(image->columns,image->rows* sizeof(*vertices)); monotone_info=AcquireVirtualMemory(2*image->columns,2* image->rows*sizeof(*monotone_chain)); if ((vertices_info == (MemoryInfo *) NULL) || (monotone_info == (MemoryInfo *) NULL)) { if (monotone_info != (MemoryInfo *) NULL) monotone_info=(MemoryInfo *) RelinquishVirtualMemory(monotone_info); if (vertices_info != (MemoryInfo *) NULL) vertices_info=RelinquishVirtualMemory(vertices_info); return((PointInfo *) NULL); } vertices=(PointInfo *) GetVirtualMemoryBlob(vertices_info); monotone_chain=(PointInfo **) GetVirtualMemoryBlob(monotone_info); image_view=AcquireVirtualCacheView(image,exception); background=GetEdgeBackgroundColor(image,image_view,exception); status=MagickTrue; n=0; for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *p; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { PixelInfo pixel; GetPixelInfoPixel(image,p,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,&background) == MagickFalse) { vertices[n].x=(double) x; vertices[n].y=(double) y; n++; } p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); /* Return the convex hull of the image foreground object(s). */ TraceConvexHull(vertices,n,&monotone_chain,number_vertices); convex_hull=(PointInfo *) AcquireQuantumMemory(*number_vertices, sizeof(*convex_hull)); if (convex_hull != (PointInfo *) NULL) for (n=0; n < *number_vertices; n++) convex_hull[n]=(*monotone_chain[n]); monotone_info=RelinquishVirtualMemory(monotone_info); vertices_info=RelinquishVirtualMemory(vertices_info); return(convex_hull); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageDepth() returns the depth of a particular image channel. % % The format of the GetImageDepth method is: % % size_t GetImageDepth(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport size_t GetImageDepth(const Image *image,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; ssize_t i; size_t *current_depth, depth, number_threads; ssize_t y; /* Compute image depth. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); number_threads=(size_t) GetMagickResourceLimit(ThreadResource); current_depth=(size_t *) AcquireQuantumMemory(number_threads, sizeof(*current_depth)); if (current_depth == (size_t *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); status=MagickTrue; for (i=0; i < (ssize_t) number_threads; i++) current_depth[i]=1; if ((image->storage_class == PseudoClass) && (image->alpha_trait == UndefinedPixelTrait)) { for (i=0; i < (ssize_t) image->colors; i++) { const int id = GetOpenMPThreadId(); while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH) { MagickBooleanType atDepth; QuantumAny range; atDepth=MagickTrue; range=GetQuantumRange(current_depth[id]); if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) if (IsPixelAtDepth(ClampToQuantum(image->colormap[i].red),range) == MagickFalse) atDepth=MagickFalse; if ((atDepth != MagickFalse) && (GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) if (IsPixelAtDepth(ClampToQuantum(image->colormap[i].green),range) == MagickFalse) atDepth=MagickFalse; if ((atDepth != MagickFalse) && (GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) if (IsPixelAtDepth(ClampToQuantum(image->colormap[i].blue),range) == MagickFalse) atDepth=MagickFalse; if ((atDepth != MagickFalse)) break; current_depth[id]++; } } depth=current_depth[0]; for (i=1; i < (ssize_t) number_threads; i++) if (depth < current_depth[i]) depth=current_depth[i]; current_depth=(size_t *) RelinquishMagickMemory(current_depth); return(depth); } image_view=AcquireVirtualCacheView(image,exception); #if !defined(MAGICKCORE_HDRI_SUPPORT) if ((1UL*QuantumRange) <= MaxMap) { size_t *depth_map; /* Scale pixels to desired (optimized with depth map). */ depth_map=(size_t *) AcquireQuantumMemory(MaxMap+1,sizeof(*depth_map)); if (depth_map == (size_t *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); for (i=0; i <= (ssize_t) MaxMap; i++) { unsigned int depth; for (depth=1; depth < MAGICKCORE_QUANTUM_DEPTH; depth++) { Quantum pixel; QuantumAny range; range=GetQuantumRange(depth); pixel=(Quantum) i; if (pixel == ScaleAnyToQuantum(ScaleQuantumToAny(pixel,range),range)) break; } depth_map[i]=depth; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) continue; for (x=0; x < (ssize_t) image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if (depth_map[ScaleQuantumToMap(p[i])] > current_depth[id]) current_depth[id]=depth_map[ScaleQuantumToMap(p[i])]; } p+=GetPixelChannels(image); } if (current_depth[id] == MAGICKCORE_QUANTUM_DEPTH) status=MagickFalse; } image_view=DestroyCacheView(image_view); depth=current_depth[0]; for (i=1; i < (ssize_t) number_threads; i++) if (depth < current_depth[i]) depth=current_depth[i]; depth_map=(size_t *) RelinquishMagickMemory(depth_map); current_depth=(size_t *) RelinquishMagickMemory(current_depth); return(depth); } #endif /* Compute pixel depth. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) continue; for (x=0; x < (ssize_t) image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel; PixelTrait traits; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH) { QuantumAny range; range=GetQuantumRange(current_depth[id]); if (p[i] == ScaleAnyToQuantum(ScaleQuantumToAny(p[i],range),range)) break; current_depth[id]++; } } p+=GetPixelChannels(image); } if (current_depth[id] == MAGICKCORE_QUANTUM_DEPTH) status=MagickFalse; } image_view=DestroyCacheView(image_view); depth=current_depth[0]; for (i=1; i < (ssize_t) number_threads; i++) if (depth < current_depth[i]) depth=current_depth[i]; current_depth=(size_t *) RelinquishMagickMemory(current_depth); return(depth); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e M i n i m u m B o u n d i n g B o x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageMinimumBoundingBox() returns the points that form the minimum % bounding box around the image foreground objects with the "Rotating % Calipers" algorithm. The method also returns these properties: % minimum-bounding-box:area, minimum-bounding-box:width, % minimum-bounding-box:height, and minimum-bounding-box:angle. % % The format of the GetImageMinimumBoundingBox method is: % % PointInfo *GetImageMinimumBoundingBox(Image *image, % size_t number_vertices,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o number_vertices: the number of vertices in the bounding box. % % o exception: return any errors or warnings in this structure. % */ typedef struct _CaliperInfo { double area, width, height, projection; ssize_t p, q, v; } CaliperInfo; static inline double getAngle(PointInfo *p,PointInfo *q) { /* Get the angle between line (p,q) and horizontal axis, in degrees. */ return(RadiansToDegrees(atan2(q->y-p->y,q->x-p->x))); } static inline double getDistance(PointInfo *p,PointInfo *q) { double distance; distance=hypot(p->x-q->x,p->y-q->y); return(distance*distance); } static inline double getProjection(PointInfo *p,PointInfo *q,PointInfo *v) { double distance; /* Projection of vector (x,y) - p into a line passing through p and q. */ distance=getDistance(p,q); if (distance < MagickEpsilon) return(INFINITY); return((q->x-p->x)*(v->x-p->x)+(v->y-p->y)*(q->y-p->y))/sqrt(distance); } static inline double getFeretDiameter(PointInfo *p,PointInfo *q,PointInfo *v) { double distance; /* Distance from a point (x,y) to a line passing through p and q. */ distance=getDistance(p,q); if (distance < MagickEpsilon) return(INFINITY); return((q->x-p->x)*(v->y-p->y)-(v->x-p->x)*(q->y-p->y))/sqrt(distance); } MagickExport PointInfo *GetImageMinimumBoundingBox(Image *image, size_t *number_vertices,ExceptionInfo *exception) { CaliperInfo caliper_info; const char *artifact; double angle, diameter, distance; PointInfo *bounding_box, *vertices; ssize_t i; size_t number_hull_vertices; /* Generate the minimum bounding box with the "Rotating Calipers" algorithm. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); *number_vertices=0; vertices=GetImageConvexHull(image,&number_hull_vertices,exception); if (vertices == (PointInfo *) NULL) return((PointInfo *) NULL); *number_vertices=4; bounding_box=(PointInfo *) AcquireQuantumMemory(*number_vertices, sizeof(*bounding_box)); if (bounding_box == (PointInfo *) NULL) { vertices=(PointInfo *) RelinquishMagickMemory(vertices); return((PointInfo *) NULL); } caliper_info.area=2.0*image->columns*image->rows; caliper_info.width=(double) image->columns+image->rows; caliper_info.height=0.0; caliper_info.projection=0.0; caliper_info.p=(-1); caliper_info.q=(-1); caliper_info.v=(-1); for (i=0; i < (ssize_t) number_hull_vertices; i++) { double area = 0.0, max_projection = 0.0, min_diameter = -1.0, min_projection = 0.0; ssize_t j, k; ssize_t p = -1, q = -1, v = -1; for (j=0; j < (ssize_t) number_hull_vertices; j++) { double diameter; diameter=fabs(getFeretDiameter(&vertices[i], &vertices[(i+1) % number_hull_vertices],&vertices[j])); if (min_diameter < diameter) { min_diameter=diameter; p=i; q=(i+1) % number_hull_vertices; v=j; } } for (k=0; k < (ssize_t) number_hull_vertices; k++) { double projection; /* Rotating calipers. */ projection=getProjection(&vertices[p],&vertices[q],&vertices[k]); min_projection=MagickMin(min_projection,projection); max_projection=MagickMax(max_projection,projection); } area=min_diameter*(max_projection-min_projection); if (caliper_info.area > area) { caliper_info.area=area; caliper_info.width=min_diameter; caliper_info.height=max_projection-min_projection; caliper_info.projection=max_projection; caliper_info.p=p; caliper_info.q=q; caliper_info.v=v; } } /* Initialize minimum bounding box. */ diameter=getFeretDiameter(&vertices[caliper_info.p], &vertices[caliper_info.q],&vertices[caliper_info.v]); angle=atan2(vertices[caliper_info.q].y-vertices[caliper_info.p].y, vertices[caliper_info.q].x-vertices[caliper_info.p].x); bounding_box[0].x=vertices[caliper_info.p].x+cos(angle)* caliper_info.projection; bounding_box[0].y=vertices[caliper_info.p].y+sin(angle)* caliper_info.projection; bounding_box[1].x=floor(bounding_box[0].x+cos(angle+MagickPI/2.0)*diameter+ 0.5); bounding_box[1].y=floor(bounding_box[0].y+sin(angle+MagickPI/2.0)*diameter+ 0.5); bounding_box[2].x=floor(bounding_box[1].x+cos(angle)*(-caliper_info.height)+ 0.5); bounding_box[2].y=floor(bounding_box[1].y+sin(angle)*(-caliper_info.height)+ 0.5); bounding_box[3].x=floor(bounding_box[2].x+cos(angle+MagickPI/2.0)*(-diameter)+ 0.5); bounding_box[3].y=floor(bounding_box[2].y+sin(angle+MagickPI/2.0)*(-diameter)+ 0.5); /* Export minimum bounding box properties. */ (void) FormatImageProperty(image,"minimum-bounding-box:area","%.*g", GetMagickPrecision(),caliper_info.area); (void) FormatImageProperty(image,"minimum-bounding-box:width","%.*g", GetMagickPrecision(),caliper_info.width); (void) FormatImageProperty(image,"minimum-bounding-box:height","%.*g", GetMagickPrecision(),caliper_info.height); (void) FormatImageProperty(image,"minimum-bounding-box:_p","%.*g,%.*g", GetMagickPrecision(),vertices[caliper_info.p].x, GetMagickPrecision(),vertices[caliper_info.p].y); (void) FormatImageProperty(image,"minimum-bounding-box:_q","%.*g,%.*g", GetMagickPrecision(),vertices[caliper_info.q].x, GetMagickPrecision(),vertices[caliper_info.q].y); (void) FormatImageProperty(image,"minimum-bounding-box:_v","%.*g,%.*g", GetMagickPrecision(),vertices[caliper_info.v].x, GetMagickPrecision(),vertices[caliper_info.v].y); /* Find smallest angle to origin. */ distance=hypot(bounding_box[0].x,bounding_box[0].y); angle=getAngle(&bounding_box[0],&bounding_box[1]); for (i=1; i < 4; i++) { double d = hypot(bounding_box[i].x,bounding_box[i].y); if (d < distance) { distance=d; angle=getAngle(&bounding_box[i],&bounding_box[(i+1) % 4]); } } artifact=GetImageArtifact(image,"minimum-bounding-box:orientation"); if (artifact != (const char *) NULL) { double length, q_length, p_length; PointInfo delta, point; /* Find smallest perpendicular distance from edge to origin. */ point=bounding_box[0]; for (i=1; i < 4; i++) { if (bounding_box[i].x < point.x) point.x=bounding_box[i].x; if (bounding_box[i].y < point.y) point.y=bounding_box[i].y; } for (i=0; i < 4; i++) { bounding_box[i].x-=point.x; bounding_box[i].y-=point.y; } for (i=0; i < 4; i++) { double d, intercept, slope; delta.x=bounding_box[(i+1) % 4].x-bounding_box[i].x; delta.y=bounding_box[(i+1) % 4].y-bounding_box[i].y; slope=delta.y*PerceptibleReciprocal(delta.x); intercept=bounding_box[(i+1) % 4].y-slope*bounding_box[i].x; d=fabs((slope*bounding_box[i].x-bounding_box[i].y+intercept)* PerceptibleReciprocal(sqrt(slope*slope+1.0))); if ((i == 0) || (d < distance)) { distance=d; point=delta; } } angle=RadiansToDegrees(atan(point.y*PerceptibleReciprocal(point.x))); length=hypot(point.x,point.y); p_length=fabs((double) MagickMax(caliper_info.width,caliper_info.height)- length); q_length=fabs(length-(double) MagickMin(caliper_info.width, caliper_info.height)); if (LocaleCompare(artifact,"landscape") == 0) { if (p_length > q_length) angle+=(angle < 0.0) ? 90.0 : -90.0; } else if (LocaleCompare(artifact,"portrait") == 0) { if (p_length < q_length) angle+=(angle >= 0.0) ? 90.0 : -90.0; } } (void) FormatImageProperty(image,"minimum-bounding-box:angle","%.*g", GetMagickPrecision(),angle); (void) FormatImageProperty(image,"minimum-bounding-box:unrotate","%.*g", GetMagickPrecision(),-angle); vertices=(PointInfo *) RelinquishMagickMemory(vertices); return(bounding_box); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e Q u a n t u m D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageQuantumDepth() returns the depth of the image rounded to a legal % quantum depth: 8, 16, or 32. % % The format of the GetImageQuantumDepth method is: % % size_t GetImageQuantumDepth(const Image *image, % const MagickBooleanType constrain) % % A description of each parameter follows: % % o image: the image. % % o constrain: A value other than MagickFalse, constrains the depth to % a maximum of MAGICKCORE_QUANTUM_DEPTH. % */ MagickExport size_t GetImageQuantumDepth(const Image *image, const MagickBooleanType constrain) { size_t depth; depth=image->depth; if (depth <= 8) depth=8; else if (depth <= 16) depth=16; else if (depth <= 32) depth=32; else if (depth <= 64) depth=64; if (constrain != MagickFalse) depth=(size_t) MagickMin((double) depth,(double) MAGICKCORE_QUANTUM_DEPTH); return(depth); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageType() returns the type of image: % % Bilevel Grayscale GrayscaleMatte % Palette PaletteMatte TrueColor % TrueColorMatte ColorSeparation ColorSeparationMatte % % The format of the GetImageType method is: % % ImageType GetImageType(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport ImageType GetImageType(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->colorspace == CMYKColorspace) { if (image->alpha_trait == UndefinedPixelTrait) return(ColorSeparationType); return(ColorSeparationAlphaType); } if (IsImageMonochrome(image) != MagickFalse) return(BilevelType); if (IsImageGray(image) != MagickFalse) { if (image->alpha_trait != UndefinedPixelTrait) return(GrayscaleAlphaType); return(GrayscaleType); } if (IsPaletteImage(image) != MagickFalse) { if (image->alpha_trait != UndefinedPixelTrait) return(PaletteAlphaType); return(PaletteType); } if (image->alpha_trait != UndefinedPixelTrait) return(TrueColorAlphaType); return(TrueColorType); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I d e n t i f y I m a g e G r a y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IdentifyImageGray() returns grayscale if all the pixels in the image have % the same red, green, and blue intensities, and bi-level is the intensity is % either 0 or QuantumRange. Otherwise undefined is returned. % % The format of the IdentifyImageGray method is: % % ImageType IdentifyImageGray(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport ImageType IdentifyImageGray(const Image *image, ExceptionInfo *exception) { CacheView *image_view; ImageType type; const Quantum *p; ssize_t x; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((image->type == BilevelType) || (image->type == GrayscaleType) || (image->type == GrayscaleAlphaType)) return(image->type); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) return(UndefinedType); type=BilevelType; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (IsPixelGray(image,p) == MagickFalse) { type=UndefinedType; break; } if ((type == BilevelType) && (IsPixelMonochrome(image,p) == MagickFalse)) type=GrayscaleType; p+=GetPixelChannels(image); } if (type == UndefinedType) break; } image_view=DestroyCacheView(image_view); if ((type == GrayscaleType) && (image->alpha_trait != UndefinedPixelTrait)) type=GrayscaleAlphaType; return(type); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I d e n t i f y I m a g e M o n o c h r o m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IdentifyImageMonochrome() returns MagickTrue if all the pixels in the image % have the same red, green, and blue intensities and the intensity is either % 0 or QuantumRange. % % The format of the IdentifyImageMonochrome method is: % % MagickBooleanType IdentifyImageMonochrome(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType IdentifyImageMonochrome(const Image *image, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType bilevel; ssize_t x; const Quantum *p; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->type == BilevelType) return(MagickTrue); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) return(MagickFalse); bilevel=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (IsPixelMonochrome(image,p) == MagickFalse) { bilevel=MagickFalse; break; } p+=GetPixelChannels(image); } if (bilevel == MagickFalse) break; } image_view=DestroyCacheView(image_view); return(bilevel); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I d e n t i f y I m a g e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IdentifyImageType() returns the potential type of image: % % Bilevel Grayscale GrayscaleMatte % Palette PaletteMatte TrueColor % TrueColorMatte ColorSeparation ColorSeparationMatte % % To ensure the image type matches its potential, use SetImageType(): % % (void) SetImageType(image,IdentifyImageType(image,exception),exception); % % The format of the IdentifyImageType method is: % % ImageType IdentifyImageType(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport ImageType IdentifyImageType(const Image *image, ExceptionInfo *exception) { ImageType type; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->colorspace == CMYKColorspace) { if (image->alpha_trait == UndefinedPixelTrait) return(ColorSeparationType); return(ColorSeparationAlphaType); } type=IdentifyImageGray(image,exception); if ((type == BilevelType) || (type == GrayscaleType) || (type == GrayscaleAlphaType)) return(type); if (IdentifyPaletteImage(image,exception) != MagickFalse) { if (image->alpha_trait != UndefinedPixelTrait) return(PaletteAlphaType); return(PaletteType); } if (image->alpha_trait != UndefinedPixelTrait) return(TrueColorAlphaType); return(TrueColorType); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s I m a g e G r a y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsImageGray() returns MagickTrue if the type of the image is grayscale or % bi-level. % % The format of the IsImageGray method is: % % MagickBooleanType IsImageGray(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType IsImageGray(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if ((image->type == BilevelType) || (image->type == GrayscaleType) || (image->type == GrayscaleAlphaType)) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s I m a g e M o n o c h r o m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsImageMonochrome() returns MagickTrue if type of the image is bi-level. % % The format of the IsImageMonochrome method is: % % MagickBooleanType IsImageMonochrome(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType IsImageMonochrome(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->type == BilevelType) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s I m a g e O p a q u e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsImageOpaque() returns MagickTrue if none of the pixels in the image have % an alpha value other than OpaqueAlpha (QuantumRange). % % Will return true immediatally is alpha channel is not available. % % The format of the IsImageOpaque method is: % % MagickBooleanType IsImageOpaque(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType IsImageOpaque(const Image *image, ExceptionInfo *exception) { CacheView *image_view; const Quantum *p; ssize_t x; ssize_t y; /* Determine if image is opaque. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->alpha_trait == UndefinedPixelTrait) return(MagickTrue); image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelAlpha(image,p) != OpaqueAlpha) break; p+=GetPixelChannels(image); } if (x < (ssize_t) image->columns) break; } image_view=DestroyCacheView(image_view); return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageDepth() sets the depth of the image. % % The format of the SetImageDepth method is: % % MagickBooleanType SetImageDepth(Image *image,const size_t depth, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o depth: the image depth. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageDepth(Image *image, const size_t depth,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; QuantumAny range; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (depth >= MAGICKCORE_QUANTUM_DEPTH) { image->depth=depth; return(MagickTrue); } range=GetQuantumRange(depth); if (image->storage_class == PseudoClass) { ssize_t i; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->colors,1) #endif for (i=0; i < (ssize_t) image->colors; i++) { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red=(double) ScaleAnyToQuantum(ScaleQuantumToAny( ClampPixel(image->colormap[i].red),range),range); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green=(double) ScaleAnyToQuantum(ScaleQuantumToAny( ClampPixel(image->colormap[i].green),range),range); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue=(double) ScaleAnyToQuantum(ScaleQuantumToAny( ClampPixel(image->colormap[i].blue),range),range); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].alpha=(double) ScaleAnyToQuantum(ScaleQuantumToAny( ClampPixel(image->colormap[i].alpha),range),range); } } status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if !defined(MAGICKCORE_HDRI_SUPPORT) if ((1UL*QuantumRange) <= MaxMap) { Quantum *depth_map; ssize_t i; /* Scale pixels to desired (optimized with depth map). */ depth_map=(Quantum *) AcquireQuantumMemory(MaxMap+1,sizeof(*depth_map)); if (depth_map == (Quantum *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); for (i=0; i <= (ssize_t) MaxMap; i++) depth_map[i]=ScaleAnyToQuantum(ScaleQuantumToAny((Quantum) i,range), range); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel; PixelTrait traits; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i]=depth_map[ScaleQuantumToMap(q[i])]; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) { status=MagickFalse; continue; } } image_view=DestroyCacheView(image_view); depth_map=(Quantum *) RelinquishMagickMemory(depth_map); if (status != MagickFalse) image->depth=depth; return(status); } #endif /* Scale pixels to desired depth. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel; PixelTrait traits; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ScaleAnyToQuantum(ScaleQuantumToAny(ClampPixel((MagickRealType) q[i]),range),range); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) { status=MagickFalse; continue; } } image_view=DestroyCacheView(image_view); if (status != MagickFalse) image->depth=depth; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageType() sets the type of image. Choose from these types: % % Bilevel Grayscale GrayscaleMatte % Palette PaletteMatte TrueColor % TrueColorMatte ColorSeparation ColorSeparationMatte % OptimizeType % % The format of the SetImageType method is: % % MagickBooleanType SetImageType(Image *image,const ImageType type, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o type: Image type. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageType(Image *image,const ImageType type, ExceptionInfo *exception) { const char *artifact; ImageInfo *image_info; MagickBooleanType status; QuantizeInfo *quantize_info; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); status=MagickTrue; image_info=AcquireImageInfo(); image_info->dither=image->dither; artifact=GetImageArtifact(image,"dither"); if (artifact != (const char *) NULL) (void) SetImageOption(image_info,"dither",artifact); switch (type) { case BilevelType: { status=TransformImageColorspace(image,GRAYColorspace,exception); (void) NormalizeImage(image,exception); quantize_info=AcquireQuantizeInfo(image_info); quantize_info->number_colors=2; quantize_info->colorspace=GRAYColorspace; status=QuantizeImage(quantize_info,image,exception); quantize_info=DestroyQuantizeInfo(quantize_info); image->alpha_trait=UndefinedPixelTrait; break; } case GrayscaleType: { status=TransformImageColorspace(image,GRAYColorspace,exception); image->alpha_trait=UndefinedPixelTrait; break; } case GrayscaleAlphaType: { status=TransformImageColorspace(image,GRAYColorspace,exception); if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); break; } case PaletteType: { status=TransformImageColorspace(image,sRGBColorspace,exception); if ((image->storage_class == DirectClass) || (image->colors > 256)) { quantize_info=AcquireQuantizeInfo(image_info); quantize_info->number_colors=256; status=QuantizeImage(quantize_info,image,exception); quantize_info=DestroyQuantizeInfo(quantize_info); } image->alpha_trait=UndefinedPixelTrait; break; } case PaletteBilevelAlphaType: { ChannelType channel_mask; status=TransformImageColorspace(image,sRGBColorspace,exception); if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); channel_mask=SetImageChannelMask(image,AlphaChannel); (void) BilevelImage(image,(double) QuantumRange/2.0,exception); (void) SetImageChannelMask(image,channel_mask); quantize_info=AcquireQuantizeInfo(image_info); status=QuantizeImage(quantize_info,image,exception); quantize_info=DestroyQuantizeInfo(quantize_info); break; } case PaletteAlphaType: { status=TransformImageColorspace(image,sRGBColorspace,exception); if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); quantize_info=AcquireQuantizeInfo(image_info); quantize_info->colorspace=TransparentColorspace; status=QuantizeImage(quantize_info,image,exception); quantize_info=DestroyQuantizeInfo(quantize_info); break; } case TrueColorType: { status=TransformImageColorspace(image,sRGBColorspace,exception); if (image->storage_class != DirectClass) status=SetImageStorageClass(image,DirectClass,exception); image->alpha_trait=UndefinedPixelTrait; break; } case TrueColorAlphaType: { status=TransformImageColorspace(image,sRGBColorspace,exception); if (image->storage_class != DirectClass) status=SetImageStorageClass(image,DirectClass,exception); if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); break; } case ColorSeparationType: { status=TransformImageColorspace(image,CMYKColorspace,exception); if (image->storage_class != DirectClass) status=SetImageStorageClass(image,DirectClass,exception); image->alpha_trait=UndefinedPixelTrait; break; } case ColorSeparationAlphaType: { status=TransformImageColorspace(image,CMYKColorspace,exception); if (image->storage_class != DirectClass) status=SetImageStorageClass(image,DirectClass,exception); if (image->alpha_trait == UndefinedPixelTrait) status=SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); break; } case OptimizeType: case UndefinedType: break; } image_info=DestroyImageInfo(image_info); if (status == MagickFalse) return(status); image->type=type; return(MagickTrue); }
fill_r_3c.c
/* * Author: Qiming Sun <osirpt.sun@gmail.com> */ #include <stdlib.h> #include <stdio.h> #include "config.h" #include "cint.h" int GTOmax_shell_dim(int *ao_loc, int *shls_slice, int ncenter); int GTOmax_cache_size(int (*intor)(), int *shls_slice, int ncenter, int *atm, int natm, int *bas, int nbas, double *env); /* * out[naoi,naoj,naok,comp] in F-order */ void GTOr3c_fill_s1(int (*intor)(), double complex *out, double complex *buf, int comp, int ish, int jsh, int *shls_slice, int *ao_loc, CINTOpt *cintopt, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; const int ksh0 = shls_slice[4]; const int ksh1 = shls_slice[5]; const size_t naoi = ao_loc[ish1] - ao_loc[ish0]; const size_t naoj = ao_loc[jsh1] - ao_loc[jsh0]; const size_t naok = ao_loc[ksh1] - ao_loc[ksh0]; const size_t nij = naoi * naoj; const int dims[] = {naoi, naoj, naok}; ish += ish0; jsh += jsh0; const int di = ao_loc[ish+1] - ao_loc[ish]; const int dj = ao_loc[jsh+1] - ao_loc[jsh]; const int ip = ao_loc[ish] - ao_loc[ish0]; const int jp = ao_loc[jsh] - ao_loc[jsh0]; out += jp * naoi + ip; int ksh, dk, k0; int shls[3]; shls[0] = ish; shls[1] = jsh; for (ksh = ksh0; ksh < ksh1; ksh++) { shls[2] = ksh; k0 = ao_loc[ksh ] - ao_loc[ksh0]; dk = ao_loc[ksh+1] - ao_loc[ksh]; (*intor)(out+k0*nij, dims, shls, atm, natm, bas, nbas, env, cintopt, buf); } } static void zcopy_s2_igtj(double complex *out, double complex *in, int comp, int ip, int nij, int nijk, int di, int dj, int dk) { const size_t dij = di * dj; const size_t ip1 = ip + 1; int i, j, k, ic; double complex *pout, *pin; for (ic = 0; ic < comp; ic++) { for (k = 0; k < dk; k++) { pout = out + k * nij; pin = in + k * dij; for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { pout[j] = pin[j*di+i]; } pout += ip1 + i; } } out += nijk; in += dij * dk; } } static void zcopy_s2_ieqj(double complex *out, double complex *in, int comp, int ip, int nij, int nijk, int di, int dj, int dk) { const size_t dij = di * dj; const size_t ip1 = ip + 1; int i, j, k, ic; double complex *pout, *pin; for (ic = 0; ic < comp; ic++) { for (k = 0; k < dk; k++) { pout = out + k * nij; pin = in + k * dij; for (i = 0; i < di; i++) { for (j = 0; j <= i; j++) { pout[j] = pin[j*di+i]; } pout += ip1 + i; } } out += nijk; in += dij * dk; } } /* * out[comp,naok,nij] in C-order * nij = i1*(i1+1)/2 - i0*(i0+1)/2 * [ \ ] * [**** ] * [***** ] * [*****. ] <= . may not be filled, if jsh-upper-bound < ish-upper-bound * [ \] */ void GTOr3c_fill_s2ij(int (*intor)(), double complex *out, double complex *buf, int comp, int ish, int jsh, int *shls_slice, int *ao_loc, CINTOpt *cintopt, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; ish += ish0; jsh += jsh0; const int ip = ao_loc[ish]; const int jp = ao_loc[jsh] - ao_loc[jsh0]; if (ip < jp) { return; } const int ksh0 = shls_slice[4]; const int ksh1 = shls_slice[5]; const int i0 = ao_loc[ish0]; const int i1 = ao_loc[ish1]; const size_t naok = ao_loc[ksh1] - ao_loc[ksh0]; const size_t off = i0 * (i0 + 1) / 2; const size_t nij = i1 * (i1 + 1) / 2 - off; const size_t nijk = nij * naok; const int di = ao_loc[ish+1] - ao_loc[ish]; const int dj = ao_loc[jsh+1] - ao_loc[jsh]; out += ip * (ip + 1) / 2 - off + jp; int ksh, dk, k0; int shls[3]; dk = GTOmax_shell_dim(ao_loc, shls_slice, 3); double *cache = (double *)(buf + di * dj * dk * comp); shls[0] = ish; shls[1] = jsh; for (ksh = ksh0; ksh < ksh1; ksh++) { shls[2] = ksh; dk = ao_loc[ksh+1] - ao_loc[ksh]; k0 = ao_loc[ksh ] - ao_loc[ksh0]; (*intor)(buf, NULL, shls, atm, natm, bas, nbas, env, cintopt, cache); if (ip != jp) { zcopy_s2_igtj(out+k0*nij, buf, comp, ip, nij, nijk, di, dj, dk); } else { zcopy_s2_ieqj(out+k0*nij, buf, comp, ip, nij, nijk, di, dj, dk); } } } void GTOr3c_fill_s2jk(int (*intor)(), double complex *out, double complex *buf, int comp, int ish, int jsh, int *shls_slice, int *ao_loc, CINTOpt *cintopt, int *atm, int natm, int *bas, int nbas, double *env) { fprintf(stderr, "GTOr3c_fill_s2jk not implemented\n"); exit(1); } void GTOr3c_drv(int (*intor)(), void (*fill)(), double complex *eri, int comp, int *shls_slice, int *ao_loc, CINTOpt *cintopt, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; const int nish = ish1 - ish0; const int njsh = jsh1 - jsh0; const int di = GTOmax_shell_dim(ao_loc, shls_slice, 3); const int cache_size = GTOmax_cache_size(intor, shls_slice, 3, atm, natm, bas, nbas, env); #pragma omp parallel default(none) \ shared(intor, fill, eri, comp, shls_slice, ao_loc, cintopt, \ atm, natm, bas, nbas, env) { int ish, jsh, ij; double complex *buf = malloc(sizeof(double complex) * (di*di*di*comp + cache_size/2)); #pragma omp for schedule(dynamic) for (ij = 0; ij < nish*njsh; ij++) { ish = ij / njsh; jsh = ij % njsh; (*fill)(intor, eri, buf, comp, ish, jsh, shls_slice, ao_loc, cintopt, atm, natm, bas, nbas, env); } free(buf); } }
matmult_openmp.c
/* Matrix multiplication example OpenMP version Jim Teresco, CS 338, Williams College, CS 341, Mount Holyoke College Mon Feb 10 10:55:31 EST 2003 Updated for CSIS-335, Siena College, Fall 2021 */ /* header files needed for printf, gettimeofday, struct timeval */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <omp.h> /* header file for our own timer.c function diffgettime */ #include "timer.h" /* we will multiply square matrices, how big? */ #define SIZE 1500 /* our matrices */ double a[SIZE][SIZE], b[SIZE][SIZE], c[SIZE][SIZE]; /* it's a simple program for now, we'll just put everything in main */ int main(int argc, char *argv[]) { /* counters */ int i, j, k; double sum; /* to pass to gettimeofday to get wall clock times */ struct timeval start, stop; /* initialize and allocate matrices, just fill with junk */ gettimeofday(&start, NULL); for (i=0; i<SIZE; i++) { for (j=0; j<SIZE; j++) { a[i][j] = i+j; b[i][j] = i-j; } } gettimeofday(&stop, NULL); printf("Initialization took: %f seconds\n", diffgettime(start,stop)); gettimeofday(&start, NULL); /* matrix-matrix multiply */ #pragma omp parallel for for (i=0; i<SIZE; i++) { /* for each row */ for (j=0; j<SIZE; j++) { /* for each column */ /* initialize result to 0 */ c[i][j] = 0; /* perform dot product */ for(k=0; k<SIZE; k++) { c[i][j] = c[i][j] + a[i][k]*b[k][j]; } } } gettimeofday(&stop, NULL); printf("Multiplication took: %f seconds\n", diffgettime(start,stop)); /* This is here to make sure the optimizing compiler doesn't get any big ideas about "optimizing" code away completely */ sum=0; for (i=0; i<SIZE; i++) { for (j=0; j<SIZE; j++) { sum += c[i][j]; } } printf("Sum of elements of c=%f\n", sum); return 0; }
Stmt.h
//===--- Stmt.h - Classes for representing statements -----------*- C++ -*-===// // // The LLVM37 Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the Stmt interface and subclasses. // //===----------------------------------------------------------------------===// #ifndef LLVM37_CLANG_AST_STMT_H #define LLVM37_CLANG_AST_STMT_H #include "clang/AST/DeclGroup.h" #include "clang/AST/StmtIterator.h" #include "clang/Basic/CapturedStmt.h" #include "clang/Basic/IdentifierTable.h" #include "clang/Basic/LLVM.h" #include "clang/Basic/SourceLocation.h" #include "llvm37/ADT/ArrayRef.h" #include "llvm37/ADT/PointerIntPair.h" #include "llvm37/Support/Compiler.h" #include "llvm37/Support/ErrorHandling.h" #include <string> namespace llvm37 { class FoldingSetNodeID; } namespace clang { class ASTContext; class Attr; class CapturedDecl; class Decl; class Expr; class IdentifierInfo; class LabelDecl; class ParmVarDecl; class PrinterHelper; struct PrintingPolicy; class QualType; class RecordDecl; class SourceManager; class StringLiteral; class SwitchStmt; class Token; class VarDecl; //===--------------------------------------------------------------------===// // ExprIterator - Iterators for iterating over Stmt* arrays that contain // only Expr*. This is needed because AST nodes use Stmt* arrays to store // references to children (to be compatible with StmtIterator). //===--------------------------------------------------------------------===// class Stmt; class Expr; class ExprIterator : public std::iterator<std::forward_iterator_tag, Expr *&, ptrdiff_t, Expr *&, Expr *&> { Stmt** I; public: ExprIterator(Stmt** i) : I(i) {} ExprIterator() : I(nullptr) {} ExprIterator& operator++() { ++I; return *this; } ExprIterator operator-(size_t i) { return I-i; } ExprIterator operator+(size_t i) { return I+i; } Expr* operator[](size_t idx); // FIXME: Verify that this will correctly return a signed distance. signed operator-(const ExprIterator& R) const { return I - R.I; } Expr* operator*() const; Expr* operator->() const; bool operator==(const ExprIterator& R) const { return I == R.I; } bool operator!=(const ExprIterator& R) const { return I != R.I; } bool operator>(const ExprIterator& R) const { return I > R.I; } bool operator>=(const ExprIterator& R) const { return I >= R.I; } }; class ConstExprIterator : public std::iterator<std::forward_iterator_tag, const Expr *&, ptrdiff_t, const Expr *&, const Expr *&> { const Stmt * const *I; public: ConstExprIterator(const Stmt * const *i) : I(i) {} ConstExprIterator() : I(nullptr) {} ConstExprIterator& operator++() { ++I; return *this; } ConstExprIterator operator+(size_t i) const { return I+i; } ConstExprIterator operator-(size_t i) const { return I-i; } const Expr * operator[](size_t idx) const; signed operator-(const ConstExprIterator& R) const { return I - R.I; } const Expr * operator*() const; const Expr * operator->() const; bool operator==(const ConstExprIterator& R) const { return I == R.I; } bool operator!=(const ConstExprIterator& R) const { return I != R.I; } bool operator>(const ConstExprIterator& R) const { return I > R.I; } bool operator>=(const ConstExprIterator& R) const { return I >= R.I; } }; //===----------------------------------------------------------------------===// // AST classes for statements. // // /////////////////////////////////////////////////////////////////////////////// /// Stmt - This represents one statement. /// class LLVM37_ALIGNAS(LLVM37_PTR_SIZE) Stmt { public: enum StmtClass { NoStmtClass = 0, #define STMT(CLASS, PARENT) CLASS##Class, #define STMT_RANGE(BASE, FIRST, LAST) \ first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class, #define LAST_STMT_RANGE(BASE, FIRST, LAST) \ first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class #define ABSTRACT_STMT(STMT) #include "clang/AST/StmtNodes.inc" }; // Make vanilla 'new' and 'delete' illegal for Stmts. protected: void* operator new(size_t bytes) throw() { llvm37_unreachable("Stmts cannot be allocated with regular 'new'."); } void operator delete(void* data) throw() { llvm37_unreachable("Stmts cannot be released with regular 'delete'."); } class StmtBitfields { friend class Stmt; /// \brief The statement class. unsigned sClass : 8; }; enum { NumStmtBits = 8 }; class CompoundStmtBitfields { friend class CompoundStmt; unsigned : NumStmtBits; unsigned NumStmts : 32 - NumStmtBits; }; class ExprBitfields { friend class Expr; friend class DeclRefExpr; // computeDependence friend class InitListExpr; // ctor friend class DesignatedInitExpr; // ctor friend class BlockDeclRefExpr; // ctor friend class ASTStmtReader; // deserialization friend class CXXNewExpr; // ctor friend class DependentScopeDeclRefExpr; // ctor friend class CXXConstructExpr; // ctor friend class CallExpr; // ctor friend class OffsetOfExpr; // ctor friend class ObjCMessageExpr; // ctor friend class ObjCArrayLiteral; // ctor friend class ObjCDictionaryLiteral; // ctor friend class ShuffleVectorExpr; // ctor friend class ParenListExpr; // ctor friend class CXXUnresolvedConstructExpr; // ctor friend class CXXDependentScopeMemberExpr; // ctor friend class OverloadExpr; // ctor friend class PseudoObjectExpr; // ctor friend class AtomicExpr; // ctor unsigned : NumStmtBits; unsigned ValueKind : 2; unsigned ObjectKind : 2; unsigned TypeDependent : 1; unsigned ValueDependent : 1; unsigned InstantiationDependent : 1; unsigned ContainsUnexpandedParameterPack : 1; }; enum { NumExprBits = 16 }; class CharacterLiteralBitfields { friend class CharacterLiteral; unsigned : NumExprBits; unsigned Kind : 2; }; enum APFloatSemantics { IEEEhalf, IEEEsingle, IEEEdouble, x87DoubleExtended, IEEEquad, PPCDoubleDouble }; class FloatingLiteralBitfields { friend class FloatingLiteral; unsigned : NumExprBits; unsigned Semantics : 3; // Provides semantics for APFloat construction unsigned IsExact : 1; }; class UnaryExprOrTypeTraitExprBitfields { friend class UnaryExprOrTypeTraitExpr; unsigned : NumExprBits; unsigned Kind : 3; // HLSL Change unsigned IsType : 1; // true if operand is a type, false if an expression. }; class DeclRefExprBitfields { friend class DeclRefExpr; friend class ASTStmtReader; // deserialization unsigned : NumExprBits; unsigned HasQualifier : 1; unsigned HasTemplateKWAndArgsInfo : 1; unsigned HasFoundDecl : 1; unsigned HadMultipleCandidates : 1; unsigned RefersToEnclosingVariableOrCapture : 1; }; class CastExprBitfields { friend class CastExpr; unsigned : NumExprBits; unsigned Kind : 7; // HLSL Change unsigned BasePathSize : 32 - 7 - NumExprBits; // HLSL Change }; class CallExprBitfields { friend class CallExpr; unsigned : NumExprBits; unsigned NumPreArgs : 1; }; class ExprWithCleanupsBitfields { friend class ExprWithCleanups; friend class ASTStmtReader; // deserialization unsigned : NumExprBits; unsigned NumObjects : 32 - NumExprBits; }; class PseudoObjectExprBitfields { friend class PseudoObjectExpr; friend class ASTStmtReader; // deserialization unsigned : NumExprBits; // These don't need to be particularly wide, because they're // strictly limited by the forms of expressions we permit. unsigned NumSubExprs : 8; unsigned ResultIndex : 32 - 8 - NumExprBits; }; class ObjCIndirectCopyRestoreExprBitfields { friend class ObjCIndirectCopyRestoreExpr; unsigned : NumExprBits; unsigned ShouldCopy : 1; }; class InitListExprBitfields { friend class InitListExpr; unsigned : NumExprBits; /// Whether this initializer list originally had a GNU array-range /// designator in it. This is a temporary marker used by CodeGen. unsigned HadArrayRangeDesignator : 1; }; class TypeTraitExprBitfields { friend class TypeTraitExpr; friend class ASTStmtReader; friend class ASTStmtWriter; unsigned : NumExprBits; /// \brief The kind of type trait, which is a value of a TypeTrait enumerator. unsigned Kind : 8; /// \brief If this expression is not value-dependent, this indicates whether /// the trait evaluated true or false. unsigned Value : 1; /// \brief The number of arguments to this type trait. unsigned NumArgs : 32 - 8 - 1 - NumExprBits; }; union { StmtBitfields StmtBits; CompoundStmtBitfields CompoundStmtBits; ExprBitfields ExprBits; CharacterLiteralBitfields CharacterLiteralBits; FloatingLiteralBitfields FloatingLiteralBits; UnaryExprOrTypeTraitExprBitfields UnaryExprOrTypeTraitExprBits; DeclRefExprBitfields DeclRefExprBits; CastExprBitfields CastExprBits; CallExprBitfields CallExprBits; ExprWithCleanupsBitfields ExprWithCleanupsBits; PseudoObjectExprBitfields PseudoObjectExprBits; ObjCIndirectCopyRestoreExprBitfields ObjCIndirectCopyRestoreExprBits; InitListExprBitfields InitListExprBits; TypeTraitExprBitfields TypeTraitExprBits; }; friend class ASTStmtReader; friend class ASTStmtWriter; public: // Only allow allocation of Stmts using the allocator in ASTContext // or by doing a placement new. void* operator new(size_t bytes, const ASTContext& C, unsigned alignment = 8); void* operator new(size_t bytes, const ASTContext* C, unsigned alignment = 8) { return operator new(bytes, *C, alignment); } void* operator new(size_t bytes, void* mem) throw() { return mem; } void operator delete(void*, const ASTContext&, unsigned) throw() { } void operator delete(void*, const ASTContext*, unsigned) throw() { } void operator delete(void*, size_t) throw() { } void operator delete(void*, void*) throw() { } public: /// \brief A placeholder type used to construct an empty shell of a /// type, that will be filled in later (e.g., by some /// de-serialization). struct EmptyShell { }; private: /// \brief Whether statistic collection is enabled. static bool StatisticsEnabled; protected: /// \brief Construct an empty statement. explicit Stmt(StmtClass SC, EmptyShell) : Stmt(SC) {} public: Stmt(StmtClass SC) { static_assert(sizeof(*this) % llvm37::AlignOf<void *>::Alignment == 0, "Insufficient alignment!"); StmtBits.sClass = SC; if (StatisticsEnabled) Stmt::addStmtClass(SC); } StmtClass getStmtClass() const { return static_cast<StmtClass>(StmtBits.sClass); } const char *getStmtClassName() const; /// SourceLocation tokens are not useful in isolation - they are low level /// value objects created/interpreted by SourceManager. We assume AST /// clients will have a pointer to the respective SourceManager. SourceRange getSourceRange() const LLVM37_READONLY; SourceLocation getLocStart() const LLVM37_READONLY; SourceLocation getLocEnd() const LLVM37_READONLY; // global temp stats (until we have a per-module visitor) static void addStmtClass(const StmtClass s); static void EnableStatistics(); static void PrintStats(); /// \brief Dumps the specified AST fragment and all subtrees to /// \c llvm37::errs(). void dump() const; void dump(SourceManager &SM) const; void dump(raw_ostream &OS, SourceManager &SM) const; void dump(raw_ostream &OS) const; /// dumpColor - same as dump(), but forces color highlighting. void dumpColor() const; /// dumpPretty/printPretty - These two methods do a "pretty print" of the AST /// back to its original source language syntax. void dumpPretty(const ASTContext &Context) const; void printPretty(raw_ostream &OS, PrinterHelper *Helper, const PrintingPolicy &Policy, unsigned Indentation = 0) const; /// viewAST - Visualize an AST rooted at this Stmt* using GraphViz. Only /// works on systems with GraphViz (Mac OS X) or dot+gv installed. void viewAST() const; /// Skip past any implicit AST nodes which might surround this /// statement, such as ExprWithCleanups or ImplicitCastExpr nodes. Stmt *IgnoreImplicit(); /// \brief Skip no-op (attributed, compound) container stmts and skip captured /// stmt at the top, if \a IgnoreCaptured is true. Stmt *IgnoreContainers(bool IgnoreCaptured = false); const Stmt *stripLabelLikeStatements() const; Stmt *stripLabelLikeStatements() { return const_cast<Stmt*>( const_cast<const Stmt*>(this)->stripLabelLikeStatements()); } /// Child Iterators: All subclasses must implement 'children' /// to permit easy iteration over the substatements/subexpessions of an /// AST node. This permits easy iteration over all nodes in the AST. typedef StmtIterator child_iterator; typedef ConstStmtIterator const_child_iterator; typedef StmtRange child_range; typedef ConstStmtRange const_child_range; child_range children(); const_child_range children() const { return const_cast<Stmt*>(this)->children(); } child_iterator child_begin() { return children().first; } child_iterator child_end() { return children().second; } const_child_iterator child_begin() const { return children().first; } const_child_iterator child_end() const { return children().second; } /// \brief Produce a unique representation of the given statement. /// /// \param ID once the profiling operation is complete, will contain /// the unique representation of the given statement. /// /// \param Context the AST context in which the statement resides /// /// \param Canonical whether the profile should be based on the canonical /// representation of this statement (e.g., where non-type template /// parameters are identified by index/level rather than their /// declaration pointers) or the exact representation of the statement as /// written in the source. void Profile(llvm37::FoldingSetNodeID &ID, const ASTContext &Context, bool Canonical) const; }; /// DeclStmt - Adaptor class for mixing declarations with statements and /// expressions. For example, CompoundStmt mixes statements, expressions /// and declarations (variables, types). Another example is ForStmt, where /// the first statement can be an expression or a declaration. /// class DeclStmt : public Stmt { DeclGroupRef DG; SourceLocation StartLoc, EndLoc; public: DeclStmt(DeclGroupRef dg, SourceLocation startLoc, SourceLocation endLoc) : Stmt(DeclStmtClass), DG(dg), StartLoc(startLoc), EndLoc(endLoc) {} /// \brief Build an empty declaration statement. explicit DeclStmt(EmptyShell Empty) : Stmt(DeclStmtClass, Empty) { } /// isSingleDecl - This method returns true if this DeclStmt refers /// to a single Decl. bool isSingleDecl() const { return DG.isSingleDecl(); } const Decl *getSingleDecl() const { return DG.getSingleDecl(); } Decl *getSingleDecl() { return DG.getSingleDecl(); } const DeclGroupRef getDeclGroup() const { return DG; } DeclGroupRef getDeclGroup() { return DG; } void setDeclGroup(DeclGroupRef DGR) { DG = DGR; } SourceLocation getStartLoc() const { return StartLoc; } void setStartLoc(SourceLocation L) { StartLoc = L; } SourceLocation getEndLoc() const { return EndLoc; } void setEndLoc(SourceLocation L) { EndLoc = L; } SourceLocation getLocStart() const LLVM37_READONLY { return StartLoc; } SourceLocation getLocEnd() const LLVM37_READONLY { return EndLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == DeclStmtClass; } // Iterators over subexpressions. child_range children() { return child_range(child_iterator(DG.begin(), DG.end()), child_iterator(DG.end(), DG.end())); } typedef DeclGroupRef::iterator decl_iterator; typedef DeclGroupRef::const_iterator const_decl_iterator; typedef llvm37::iterator_range<decl_iterator> decl_range; typedef llvm37::iterator_range<const_decl_iterator> decl_const_range; decl_range decls() { return decl_range(decl_begin(), decl_end()); } decl_const_range decls() const { return decl_const_range(decl_begin(), decl_end()); } decl_iterator decl_begin() { return DG.begin(); } decl_iterator decl_end() { return DG.end(); } const_decl_iterator decl_begin() const { return DG.begin(); } const_decl_iterator decl_end() const { return DG.end(); } typedef std::reverse_iterator<decl_iterator> reverse_decl_iterator; reverse_decl_iterator decl_rbegin() { return reverse_decl_iterator(decl_end()); } reverse_decl_iterator decl_rend() { return reverse_decl_iterator(decl_begin()); } }; /// NullStmt - This is the null statement ";": C99 6.8.3p3. /// class NullStmt : public Stmt { SourceLocation SemiLoc; /// \brief True if the null statement was preceded by an empty macro, e.g: /// @code /// #define CALL(x) /// CALL(0); /// @endcode bool HasLeadingEmptyMacro; public: NullStmt(SourceLocation L, bool hasLeadingEmptyMacro = false) : Stmt(NullStmtClass), SemiLoc(L), HasLeadingEmptyMacro(hasLeadingEmptyMacro) {} /// \brief Build an empty null statement. explicit NullStmt(EmptyShell Empty) : Stmt(NullStmtClass, Empty), HasLeadingEmptyMacro(false) { } SourceLocation getSemiLoc() const { return SemiLoc; } void setSemiLoc(SourceLocation L) { SemiLoc = L; } bool hasLeadingEmptyMacro() const { return HasLeadingEmptyMacro; } SourceLocation getLocStart() const LLVM37_READONLY { return SemiLoc; } SourceLocation getLocEnd() const LLVM37_READONLY { return SemiLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == NullStmtClass; } child_range children() { return child_range(); } friend class ASTStmtReader; friend class ASTStmtWriter; }; // HLSL Change: Adding discard statement support /// discard - This is the hlsl discard statement "discard;". /// class DiscardStmt : public Stmt { SourceLocation Loc; public: DiscardStmt(SourceLocation L) : Stmt(DiscardStmtClass) , Loc(L) {} /// \brief Build an empty Discard statement. explicit DiscardStmt(EmptyShell Empty) : Stmt(DiscardStmtClass, Empty) {} SourceLocation getLoc() const { return Loc; } void setLoc(SourceLocation L) { Loc = L; } SourceLocation getLocStart() const LLVM37_READONLY { return Loc; } SourceLocation getLocEnd() const LLVM37_READONLY { return Loc; } static bool classof(const Stmt *T) { return T->getStmtClass() == DiscardStmtClass; } child_range children() { return child_range(); } friend class ASTStmtReader; friend class ASTStmtWriter; }; // End of HLSL Change /// CompoundStmt - This represents a group of statements like { stmt stmt }. /// class CompoundStmt : public Stmt { Stmt** Body; SourceLocation LBraceLoc, RBraceLoc; friend class ASTStmtReader; public: CompoundStmt(const ASTContext &C, ArrayRef<Stmt*> Stmts, SourceLocation LB, SourceLocation RB); // \brief Build an empty compound statement with a location. explicit CompoundStmt(SourceLocation Loc) : Stmt(CompoundStmtClass), Body(nullptr), LBraceLoc(Loc), RBraceLoc(Loc) { CompoundStmtBits.NumStmts = 0; } // \brief Build an empty compound statement. explicit CompoundStmt(EmptyShell Empty) : Stmt(CompoundStmtClass, Empty), Body(nullptr) { CompoundStmtBits.NumStmts = 0; } void setStmts(const ASTContext &C, Stmt **Stmts, unsigned NumStmts); bool body_empty() const { return CompoundStmtBits.NumStmts == 0; } unsigned size() const { return CompoundStmtBits.NumStmts; } typedef Stmt** body_iterator; typedef llvm37::iterator_range<body_iterator> body_range; body_range body() { return body_range(body_begin(), body_end()); } body_iterator body_begin() { return Body; } body_iterator body_end() { return Body + size(); } Stmt *body_front() { return !body_empty() ? Body[0] : nullptr; } Stmt *body_back() { return !body_empty() ? Body[size()-1] : nullptr; } void setLastStmt(Stmt *S) { assert(!body_empty() && "setLastStmt"); Body[size()-1] = S; } typedef Stmt* const * const_body_iterator; typedef llvm37::iterator_range<const_body_iterator> body_const_range; body_const_range body() const { return body_const_range(body_begin(), body_end()); } const_body_iterator body_begin() const { return Body; } const_body_iterator body_end() const { return Body + size(); } const Stmt *body_front() const { return !body_empty() ? Body[0] : nullptr; } const Stmt *body_back() const { return !body_empty() ? Body[size() - 1] : nullptr; } typedef std::reverse_iterator<body_iterator> reverse_body_iterator; reverse_body_iterator body_rbegin() { return reverse_body_iterator(body_end()); } reverse_body_iterator body_rend() { return reverse_body_iterator(body_begin()); } typedef std::reverse_iterator<const_body_iterator> const_reverse_body_iterator; const_reverse_body_iterator body_rbegin() const { return const_reverse_body_iterator(body_end()); } const_reverse_body_iterator body_rend() const { return const_reverse_body_iterator(body_begin()); } SourceLocation getLocStart() const LLVM37_READONLY { return LBraceLoc; } SourceLocation getLocEnd() const LLVM37_READONLY { return RBraceLoc; } SourceLocation getLBracLoc() const { return LBraceLoc; } SourceLocation getRBracLoc() const { return RBraceLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == CompoundStmtClass; } // Iterators child_range children() { return child_range(Body, Body + CompoundStmtBits.NumStmts); } const_child_range children() const { return child_range(Body, Body + CompoundStmtBits.NumStmts); } }; // SwitchCase is the base class for CaseStmt and DefaultStmt, class SwitchCase : public Stmt { protected: // A pointer to the following CaseStmt or DefaultStmt class, // used by SwitchStmt. SwitchCase *NextSwitchCase; SourceLocation KeywordLoc; SourceLocation ColonLoc; SwitchCase(StmtClass SC, SourceLocation KWLoc, SourceLocation ColonLoc) : Stmt(SC), NextSwitchCase(nullptr), KeywordLoc(KWLoc), ColonLoc(ColonLoc) { } SwitchCase(StmtClass SC, EmptyShell) : Stmt(SC), NextSwitchCase(nullptr) {} public: const SwitchCase *getNextSwitchCase() const { return NextSwitchCase; } SwitchCase *getNextSwitchCase() { return NextSwitchCase; } void setNextSwitchCase(SwitchCase *SC) { NextSwitchCase = SC; } SourceLocation getKeywordLoc() const { return KeywordLoc; } void setKeywordLoc(SourceLocation L) { KeywordLoc = L; } SourceLocation getColonLoc() const { return ColonLoc; } void setColonLoc(SourceLocation L) { ColonLoc = L; } Stmt *getSubStmt(); const Stmt *getSubStmt() const { return const_cast<SwitchCase*>(this)->getSubStmt(); } SourceLocation getLocStart() const LLVM37_READONLY { return KeywordLoc; } SourceLocation getLocEnd() const LLVM37_READONLY; static bool classof(const Stmt *T) { return T->getStmtClass() == CaseStmtClass || T->getStmtClass() == DefaultStmtClass; } }; class CaseStmt : public SwitchCase { SourceLocation EllipsisLoc; enum { LHS, RHS, SUBSTMT, END_EXPR }; Stmt* SubExprs[END_EXPR]; // The expression for the RHS is Non-null for // GNU "case 1 ... 4" extension public: CaseStmt(Expr *lhs, Expr *rhs, SourceLocation caseLoc, SourceLocation ellipsisLoc, SourceLocation colonLoc) : SwitchCase(CaseStmtClass, caseLoc, colonLoc) { SubExprs[SUBSTMT] = nullptr; SubExprs[LHS] = reinterpret_cast<Stmt*>(lhs); SubExprs[RHS] = reinterpret_cast<Stmt*>(rhs); EllipsisLoc = ellipsisLoc; } /// \brief Build an empty switch case statement. explicit CaseStmt(EmptyShell Empty) : SwitchCase(CaseStmtClass, Empty) { } SourceLocation getCaseLoc() const { return KeywordLoc; } void setCaseLoc(SourceLocation L) { KeywordLoc = L; } SourceLocation getEllipsisLoc() const { return EllipsisLoc; } void setEllipsisLoc(SourceLocation L) { EllipsisLoc = L; } SourceLocation getColonLoc() const { return ColonLoc; } void setColonLoc(SourceLocation L) { ColonLoc = L; } Expr *getLHS() { return reinterpret_cast<Expr*>(SubExprs[LHS]); } Expr *getRHS() { return reinterpret_cast<Expr*>(SubExprs[RHS]); } Stmt *getSubStmt() { return SubExprs[SUBSTMT]; } const Expr *getLHS() const { return reinterpret_cast<const Expr*>(SubExprs[LHS]); } const Expr *getRHS() const { return reinterpret_cast<const Expr*>(SubExprs[RHS]); } const Stmt *getSubStmt() const { return SubExprs[SUBSTMT]; } void setSubStmt(Stmt *S) { SubExprs[SUBSTMT] = S; } void setLHS(Expr *Val) { SubExprs[LHS] = reinterpret_cast<Stmt*>(Val); } void setRHS(Expr *Val) { SubExprs[RHS] = reinterpret_cast<Stmt*>(Val); } SourceLocation getLocStart() const LLVM37_READONLY { return KeywordLoc; } SourceLocation getLocEnd() const LLVM37_READONLY { // Handle deeply nested case statements with iteration instead of recursion. const CaseStmt *CS = this; while (const CaseStmt *CS2 = dyn_cast<CaseStmt>(CS->getSubStmt())) CS = CS2; return CS->getSubStmt()->getLocEnd(); } static bool classof(const Stmt *T) { return T->getStmtClass() == CaseStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[END_EXPR]); } }; class DefaultStmt : public SwitchCase { Stmt* SubStmt; public: DefaultStmt(SourceLocation DL, SourceLocation CL, Stmt *substmt) : SwitchCase(DefaultStmtClass, DL, CL), SubStmt(substmt) {} /// \brief Build an empty default statement. explicit DefaultStmt(EmptyShell Empty) : SwitchCase(DefaultStmtClass, Empty) { } Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } void setSubStmt(Stmt *S) { SubStmt = S; } SourceLocation getDefaultLoc() const { return KeywordLoc; } void setDefaultLoc(SourceLocation L) { KeywordLoc = L; } SourceLocation getColonLoc() const { return ColonLoc; } void setColonLoc(SourceLocation L) { ColonLoc = L; } SourceLocation getLocStart() const LLVM37_READONLY { return KeywordLoc; } SourceLocation getLocEnd() const LLVM37_READONLY { return SubStmt->getLocEnd();} static bool classof(const Stmt *T) { return T->getStmtClass() == DefaultStmtClass; } // Iterators child_range children() { return child_range(&SubStmt, &SubStmt+1); } }; inline SourceLocation SwitchCase::getLocEnd() const { if (const CaseStmt *CS = dyn_cast<CaseStmt>(this)) return CS->getLocEnd(); return cast<DefaultStmt>(this)->getLocEnd(); } /// LabelStmt - Represents a label, which has a substatement. For example: /// foo: return; /// class LabelStmt : public Stmt { SourceLocation IdentLoc; LabelDecl *TheDecl; Stmt *SubStmt; public: LabelStmt(SourceLocation IL, LabelDecl *D, Stmt *substmt) : Stmt(LabelStmtClass), IdentLoc(IL), TheDecl(D), SubStmt(substmt) { static_assert(sizeof(LabelStmt) == 2 * sizeof(SourceLocation) + 2 * sizeof(void *), "LabelStmt too big"); } // \brief Build an empty label statement. explicit LabelStmt(EmptyShell Empty) : Stmt(LabelStmtClass, Empty) { } SourceLocation getIdentLoc() const { return IdentLoc; } LabelDecl *getDecl() const { return TheDecl; } void setDecl(LabelDecl *D) { TheDecl = D; } const char *getName() const; Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } void setIdentLoc(SourceLocation L) { IdentLoc = L; } void setSubStmt(Stmt *SS) { SubStmt = SS; } SourceLocation getLocStart() const LLVM37_READONLY { return IdentLoc; } SourceLocation getLocEnd() const LLVM37_READONLY { return SubStmt->getLocEnd();} child_range children() { return child_range(&SubStmt, &SubStmt+1); } static bool classof(const Stmt *T) { return T->getStmtClass() == LabelStmtClass; } }; /// \brief Represents an attribute applied to a statement. /// /// Represents an attribute applied to a statement. For example: /// [[omp::for(...)]] for (...) { ... } /// class AttributedStmt : public Stmt { Stmt *SubStmt; SourceLocation AttrLoc; unsigned NumAttrs; friend class ASTStmtReader; AttributedStmt(SourceLocation Loc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt) : Stmt(AttributedStmtClass), SubStmt(SubStmt), AttrLoc(Loc), NumAttrs(Attrs.size()) { memcpy(getAttrArrayPtr(), Attrs.data(), Attrs.size() * sizeof(Attr *)); } explicit AttributedStmt(EmptyShell Empty, unsigned NumAttrs) : Stmt(AttributedStmtClass, Empty), NumAttrs(NumAttrs) { memset(getAttrArrayPtr(), 0, NumAttrs * sizeof(Attr *)); } Attr *const *getAttrArrayPtr() const { return reinterpret_cast<Attr *const *>(this + 1); } Attr **getAttrArrayPtr() { return reinterpret_cast<Attr **>(this + 1); } public: static AttributedStmt *Create(const ASTContext &C, SourceLocation Loc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt); // \brief Build an empty attributed statement. static AttributedStmt *CreateEmpty(const ASTContext &C, unsigned NumAttrs); SourceLocation getAttrLoc() const { return AttrLoc; } ArrayRef<const Attr*> getAttrs() const { return llvm37::makeArrayRef(getAttrArrayPtr(), NumAttrs); } Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } SourceLocation getLocStart() const LLVM37_READONLY { return AttrLoc; } SourceLocation getLocEnd() const LLVM37_READONLY { return SubStmt->getLocEnd();} child_range children() { return child_range(&SubStmt, &SubStmt + 1); } static bool classof(const Stmt *T) { return T->getStmtClass() == AttributedStmtClass; } }; /// IfStmt - This represents an if/then/else. /// class IfStmt : public Stmt { enum { VAR, COND, THEN, ELSE, END_EXPR }; Stmt* SubExprs[END_EXPR]; SourceLocation IfLoc; SourceLocation ElseLoc; public: IfStmt(const ASTContext &C, SourceLocation IL, VarDecl *var, Expr *cond, Stmt *then, SourceLocation EL = SourceLocation(), Stmt *elsev = nullptr); /// \brief Build an empty if/then/else statement explicit IfStmt(EmptyShell Empty) : Stmt(IfStmtClass, Empty) { } /// \brief Retrieve the variable declared in this "if" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// if (int x = foo()) { /// printf("x is %d", x); /// } /// \endcode VarDecl *getConditionVariable() const; void setConditionVariable(const ASTContext &C, VarDecl *V); /// If this IfStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. const DeclStmt *getConditionVariableDeclStmt() const { return reinterpret_cast<DeclStmt*>(SubExprs[VAR]); } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt *>(E); } const Stmt *getThen() const { return SubExprs[THEN]; } void setThen(Stmt *S) { SubExprs[THEN] = S; } const Stmt *getElse() const { return SubExprs[ELSE]; } void setElse(Stmt *S) { SubExprs[ELSE] = S; } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); } Stmt *getThen() { return SubExprs[THEN]; } Stmt *getElse() { return SubExprs[ELSE]; } SourceLocation getIfLoc() const { return IfLoc; } void setIfLoc(SourceLocation L) { IfLoc = L; } SourceLocation getElseLoc() const { return ElseLoc; } void setElseLoc(SourceLocation L) { ElseLoc = L; } SourceLocation getLocStart() const LLVM37_READONLY { return IfLoc; } SourceLocation getLocEnd() const LLVM37_READONLY { if (SubExprs[ELSE]) return SubExprs[ELSE]->getLocEnd(); else return SubExprs[THEN]->getLocEnd(); } // Iterators over subexpressions. The iterators will include iterating // over the initialization expression referenced by the condition variable. child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } static bool classof(const Stmt *T) { return T->getStmtClass() == IfStmtClass; } }; /// SwitchStmt - This represents a 'switch' stmt. /// class SwitchStmt : public Stmt { SourceLocation SwitchLoc; enum { VAR, COND, BODY, END_EXPR }; Stmt* SubExprs[END_EXPR]; // This points to a linked list of case and default statements and, if the // SwitchStmt is a switch on an enum value, records whether all the enum // values were covered by CaseStmts. The coverage information value is meant // to be a hint for possible clients. llvm37::PointerIntPair<SwitchCase *, 1, bool> FirstCase; public: SwitchStmt(const ASTContext &C, VarDecl *Var, Expr *cond); /// \brief Build a empty switch statement. explicit SwitchStmt(EmptyShell Empty) : Stmt(SwitchStmtClass, Empty) { } /// \brief Retrieve the variable declared in this "switch" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// switch (int x = foo()) { /// case 0: break; /// // ... /// } /// \endcode VarDecl *getConditionVariable() const; void setConditionVariable(const ASTContext &C, VarDecl *V); /// If this SwitchStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. const DeclStmt *getConditionVariableDeclStmt() const { return reinterpret_cast<DeclStmt*>(SubExprs[VAR]); } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} const Stmt *getBody() const { return SubExprs[BODY]; } const SwitchCase *getSwitchCaseList() const { return FirstCase.getPointer(); } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]);} void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt *>(E); } Stmt *getBody() { return SubExprs[BODY]; } void setBody(Stmt *S) { SubExprs[BODY] = S; } SwitchCase *getSwitchCaseList() { return FirstCase.getPointer(); } /// \brief Set the case list for this switch statement. void setSwitchCaseList(SwitchCase *SC) { FirstCase.setPointer(SC); } SourceLocation getSwitchLoc() const { return SwitchLoc; } void setSwitchLoc(SourceLocation L) { SwitchLoc = L; } void setBody(Stmt *S, SourceLocation SL) { SubExprs[BODY] = S; SwitchLoc = SL; } void addSwitchCase(SwitchCase *SC) { assert(!SC->getNextSwitchCase() && "case/default already added to a switch"); SC->setNextSwitchCase(FirstCase.getPointer()); FirstCase.setPointer(SC); } /// Set a flag in the SwitchStmt indicating that if the 'switch (X)' is a /// switch over an enum value then all cases have been explicitly covered. void setAllEnumCasesCovered() { FirstCase.setInt(true); } /// Returns true if the SwitchStmt is a switch of an enum value and all cases /// have been explicitly covered. bool isAllEnumCasesCovered() const { return FirstCase.getInt(); } SourceLocation getLocStart() const LLVM37_READONLY { return SwitchLoc; } SourceLocation getLocEnd() const LLVM37_READONLY { return SubExprs[BODY] ? SubExprs[BODY]->getLocEnd() : SubExprs[COND]->getLocEnd(); } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } static bool classof(const Stmt *T) { return T->getStmtClass() == SwitchStmtClass; } }; /// WhileStmt - This represents a 'while' stmt. /// class WhileStmt : public Stmt { SourceLocation WhileLoc; enum { VAR, COND, BODY, END_EXPR }; Stmt* SubExprs[END_EXPR]; public: WhileStmt(const ASTContext &C, VarDecl *Var, Expr *cond, Stmt *body, SourceLocation WL); /// \brief Build an empty while statement. explicit WhileStmt(EmptyShell Empty) : Stmt(WhileStmtClass, Empty) { } /// \brief Retrieve the variable declared in this "while" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// while (int x = random()) { /// // ... /// } /// \endcode VarDecl *getConditionVariable() const; void setConditionVariable(const ASTContext &C, VarDecl *V); /// If this WhileStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. const DeclStmt *getConditionVariableDeclStmt() const { return reinterpret_cast<DeclStmt*>(SubExprs[VAR]); } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); } Stmt *getBody() { return SubExprs[BODY]; } const Stmt *getBody() const { return SubExprs[BODY]; } void setBody(Stmt *S) { SubExprs[BODY] = S; } SourceLocation getWhileLoc() const { return WhileLoc; } void setWhileLoc(SourceLocation L) { WhileLoc = L; } SourceLocation getLocStart() const LLVM37_READONLY { return WhileLoc; } SourceLocation getLocEnd() const LLVM37_READONLY { return SubExprs[BODY]->getLocEnd(); } static bool classof(const Stmt *T) { return T->getStmtClass() == WhileStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } }; /// DoStmt - This represents a 'do/while' stmt. /// class DoStmt : public Stmt { SourceLocation DoLoc; enum { BODY, COND, END_EXPR }; Stmt* SubExprs[END_EXPR]; SourceLocation WhileLoc; SourceLocation RParenLoc; // Location of final ')' in do stmt condition. public: DoStmt(Stmt *body, Expr *cond, SourceLocation DL, SourceLocation WL, SourceLocation RP) : Stmt(DoStmtClass), DoLoc(DL), WhileLoc(WL), RParenLoc(RP) { SubExprs[COND] = reinterpret_cast<Stmt*>(cond); SubExprs[BODY] = body; } /// \brief Build an empty do-while statement. explicit DoStmt(EmptyShell Empty) : Stmt(DoStmtClass, Empty) { } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); } Stmt *getBody() { return SubExprs[BODY]; } const Stmt *getBody() const { return SubExprs[BODY]; } void setBody(Stmt *S) { SubExprs[BODY] = S; } SourceLocation getDoLoc() const { return DoLoc; } void setDoLoc(SourceLocation L) { DoLoc = L; } SourceLocation getWhileLoc() const { return WhileLoc; } void setWhileLoc(SourceLocation L) { WhileLoc = L; } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } SourceLocation getLocStart() const LLVM37_READONLY { return DoLoc; } SourceLocation getLocEnd() const LLVM37_READONLY { return RParenLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == DoStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } }; /// ForStmt - This represents a 'for (init;cond;inc)' stmt. Note that any of /// the init/cond/inc parts of the ForStmt will be null if they were not /// specified in the source. /// class ForStmt : public Stmt { SourceLocation ForLoc; enum { INIT, CONDVAR, COND, INC, BODY, END_EXPR }; Stmt* SubExprs[END_EXPR]; // SubExprs[INIT] is an expression or declstmt. SourceLocation LParenLoc, RParenLoc; public: ForStmt(const ASTContext &C, Stmt *Init, Expr *Cond, VarDecl *condVar, Expr *Inc, Stmt *Body, SourceLocation FL, SourceLocation LP, SourceLocation RP); /// \brief Build an empty for statement. explicit ForStmt(EmptyShell Empty) : Stmt(ForStmtClass, Empty) { } Stmt *getInit() { return SubExprs[INIT]; } /// \brief Retrieve the variable declared in this "for" statement, if any. /// /// In the following example, "y" is the condition variable. /// \code /// for (int x = random(); int y = mangle(x); ++x) { /// // ... /// } /// \endcode VarDecl *getConditionVariable() const; void setConditionVariable(const ASTContext &C, VarDecl *V); /// If this ForStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. const DeclStmt *getConditionVariableDeclStmt() const { return reinterpret_cast<DeclStmt*>(SubExprs[CONDVAR]); } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); } Expr *getInc() { return reinterpret_cast<Expr*>(SubExprs[INC]); } Stmt *getBody() { return SubExprs[BODY]; } const Stmt *getInit() const { return SubExprs[INIT]; } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} const Expr *getInc() const { return reinterpret_cast<Expr*>(SubExprs[INC]); } const Stmt *getBody() const { return SubExprs[BODY]; } void setInit(Stmt *S) { SubExprs[INIT] = S; } void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); } void setInc(Expr *E) { SubExprs[INC] = reinterpret_cast<Stmt*>(E); } void setBody(Stmt *S) { SubExprs[BODY] = S; } SourceLocation getForLoc() const { return ForLoc; } void setForLoc(SourceLocation L) { ForLoc = L; } SourceLocation getLParenLoc() const { return LParenLoc; } void setLParenLoc(SourceLocation L) { LParenLoc = L; } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } SourceLocation getLocStart() const LLVM37_READONLY { return ForLoc; } SourceLocation getLocEnd() const LLVM37_READONLY { return SubExprs[BODY]->getLocEnd(); } static bool classof(const Stmt *T) { return T->getStmtClass() == ForStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } }; /// GotoStmt - This represents a direct goto. /// class GotoStmt : public Stmt { LabelDecl *Label; SourceLocation GotoLoc; SourceLocation LabelLoc; public: GotoStmt(LabelDecl *label, SourceLocation GL, SourceLocation LL) : Stmt(GotoStmtClass), Label(label), GotoLoc(GL), LabelLoc(LL) {} /// \brief Build an empty goto statement. explicit GotoStmt(EmptyShell Empty) : Stmt(GotoStmtClass, Empty) { } LabelDecl *getLabel() const { return Label; } void setLabel(LabelDecl *D) { Label = D; } SourceLocation getGotoLoc() const { return GotoLoc; } void setGotoLoc(SourceLocation L) { GotoLoc = L; } SourceLocation getLabelLoc() const { return LabelLoc; } void setLabelLoc(SourceLocation L) { LabelLoc = L; } SourceLocation getLocStart() const LLVM37_READONLY { return GotoLoc; } SourceLocation getLocEnd() const LLVM37_READONLY { return LabelLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == GotoStmtClass; } // Iterators child_range children() { return child_range(); } }; /// IndirectGotoStmt - This represents an indirect goto. /// class IndirectGotoStmt : public Stmt { SourceLocation GotoLoc; SourceLocation StarLoc; Stmt *Target; public: IndirectGotoStmt(SourceLocation gotoLoc, SourceLocation starLoc, Expr *target) : Stmt(IndirectGotoStmtClass), GotoLoc(gotoLoc), StarLoc(starLoc), Target((Stmt*)target) {} /// \brief Build an empty indirect goto statement. explicit IndirectGotoStmt(EmptyShell Empty) : Stmt(IndirectGotoStmtClass, Empty) { } void setGotoLoc(SourceLocation L) { GotoLoc = L; } SourceLocation getGotoLoc() const { return GotoLoc; } void setStarLoc(SourceLocation L) { StarLoc = L; } SourceLocation getStarLoc() const { return StarLoc; } Expr *getTarget() { return reinterpret_cast<Expr*>(Target); } const Expr *getTarget() const {return reinterpret_cast<const Expr*>(Target);} void setTarget(Expr *E) { Target = reinterpret_cast<Stmt*>(E); } /// getConstantTarget - Returns the fixed target of this indirect /// goto, if one exists. LabelDecl *getConstantTarget(); const LabelDecl *getConstantTarget() const { return const_cast<IndirectGotoStmt*>(this)->getConstantTarget(); } SourceLocation getLocStart() const LLVM37_READONLY { return GotoLoc; } SourceLocation getLocEnd() const LLVM37_READONLY { return Target->getLocEnd(); } static bool classof(const Stmt *T) { return T->getStmtClass() == IndirectGotoStmtClass; } // Iterators child_range children() { return child_range(&Target, &Target+1); } }; /// ContinueStmt - This represents a continue. /// class ContinueStmt : public Stmt { SourceLocation ContinueLoc; public: ContinueStmt(SourceLocation CL) : Stmt(ContinueStmtClass), ContinueLoc(CL) {} /// \brief Build an empty continue statement. explicit ContinueStmt(EmptyShell Empty) : Stmt(ContinueStmtClass, Empty) { } SourceLocation getContinueLoc() const { return ContinueLoc; } void setContinueLoc(SourceLocation L) { ContinueLoc = L; } SourceLocation getLocStart() const LLVM37_READONLY { return ContinueLoc; } SourceLocation getLocEnd() const LLVM37_READONLY { return ContinueLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == ContinueStmtClass; } // Iterators child_range children() { return child_range(); } }; /// BreakStmt - This represents a break. /// class BreakStmt : public Stmt { SourceLocation BreakLoc; public: BreakStmt(SourceLocation BL) : Stmt(BreakStmtClass), BreakLoc(BL) { static_assert(sizeof(BreakStmt) == 2 * sizeof(SourceLocation), "BreakStmt too large"); } /// \brief Build an empty break statement. explicit BreakStmt(EmptyShell Empty) : Stmt(BreakStmtClass, Empty) { } SourceLocation getBreakLoc() const { return BreakLoc; } void setBreakLoc(SourceLocation L) { BreakLoc = L; } SourceLocation getLocStart() const LLVM37_READONLY { return BreakLoc; } SourceLocation getLocEnd() const LLVM37_READONLY { return BreakLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == BreakStmtClass; } // Iterators child_range children() { return child_range(); } }; /// ReturnStmt - This represents a return, optionally of an expression: /// return; /// return 4; /// /// Note that GCC allows return with no argument in a function declared to /// return a value, and it allows returning a value in functions declared to /// return void. We explicitly model this in the AST, which means you can't /// depend on the return type of the function and the presence of an argument. /// class ReturnStmt : public Stmt { SourceLocation RetLoc; Stmt *RetExpr; const VarDecl *NRVOCandidate; public: explicit ReturnStmt(SourceLocation RL) : ReturnStmt(RL, nullptr, nullptr) {} ReturnStmt(SourceLocation RL, Expr *E, const VarDecl *NRVOCandidate) : Stmt(ReturnStmtClass), RetLoc(RL), RetExpr((Stmt *)E), NRVOCandidate(NRVOCandidate) {} /// \brief Build an empty return expression. explicit ReturnStmt(EmptyShell Empty) : Stmt(ReturnStmtClass, Empty) { } const Expr *getRetValue() const; Expr *getRetValue(); void setRetValue(Expr *E) { RetExpr = reinterpret_cast<Stmt*>(E); } SourceLocation getReturnLoc() const { return RetLoc; } void setReturnLoc(SourceLocation L) { RetLoc = L; } /// \brief Retrieve the variable that might be used for the named return /// value optimization. /// /// The optimization itself can only be performed if the variable is /// also marked as an NRVO object. const VarDecl *getNRVOCandidate() const { return NRVOCandidate; } void setNRVOCandidate(const VarDecl *Var) { NRVOCandidate = Var; } SourceLocation getLocStart() const LLVM37_READONLY { return RetLoc; } SourceLocation getLocEnd() const LLVM37_READONLY { return RetExpr ? RetExpr->getLocEnd() : RetLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == ReturnStmtClass; } // Iterators child_range children() { if (RetExpr) return child_range(&RetExpr, &RetExpr+1); return child_range(); } }; /// AsmStmt is the base class for GCCAsmStmt and MSAsmStmt. /// class AsmStmt : public Stmt { protected: SourceLocation AsmLoc; /// \brief True if the assembly statement does not have any input or output /// operands. bool IsSimple; /// \brief If true, treat this inline assembly as having side effects. /// This assembly statement should not be optimized, deleted or moved. bool IsVolatile; unsigned NumOutputs; unsigned NumInputs; unsigned NumClobbers; Stmt **Exprs; AsmStmt(StmtClass SC, SourceLocation asmloc, bool issimple, bool isvolatile, unsigned numoutputs, unsigned numinputs, unsigned numclobbers) : Stmt (SC), AsmLoc(asmloc), IsSimple(issimple), IsVolatile(isvolatile), NumOutputs(numoutputs), NumInputs(numinputs), NumClobbers(numclobbers) { } friend class ASTStmtReader; public: /// \brief Build an empty inline-assembly statement. explicit AsmStmt(StmtClass SC, EmptyShell Empty) : Stmt(SC, Empty), Exprs(nullptr) { } SourceLocation getAsmLoc() const { return AsmLoc; } void setAsmLoc(SourceLocation L) { AsmLoc = L; } bool isSimple() const { return IsSimple; } void setSimple(bool V) { IsSimple = V; } bool isVolatile() const { return IsVolatile; } void setVolatile(bool V) { IsVolatile = V; } SourceLocation getLocStart() const LLVM37_READONLY { return SourceLocation(); } SourceLocation getLocEnd() const LLVM37_READONLY { return SourceLocation(); } //===--- Asm String Analysis ---===// /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// unsigned getNumOutputs() const { return NumOutputs; } /// getOutputConstraint - Return the constraint string for the specified /// output operand. All output constraints are known to be non-empty (either /// '=' or '+'). StringRef getOutputConstraint(unsigned i) const; /// isOutputPlusConstraint - Return true if the specified output constraint /// is a "+" constraint (which is both an input and an output) or false if it /// is an "=" constraint (just an output). bool isOutputPlusConstraint(unsigned i) const { return getOutputConstraint(i)[0] == '+'; } const Expr *getOutputExpr(unsigned i) const; /// getNumPlusOperands - Return the number of output operands that have a "+" /// constraint. unsigned getNumPlusOperands() const; //===--- Input operands ---===// unsigned getNumInputs() const { return NumInputs; } /// getInputConstraint - Return the specified input constraint. Unlike output /// constraints, these can be empty. StringRef getInputConstraint(unsigned i) const; const Expr *getInputExpr(unsigned i) const; //===--- Other ---===// unsigned getNumClobbers() const { return NumClobbers; } StringRef getClobber(unsigned i) const; static bool classof(const Stmt *T) { return T->getStmtClass() == GCCAsmStmtClass || T->getStmtClass() == MSAsmStmtClass; } // Input expr iterators. typedef ExprIterator inputs_iterator; typedef ConstExprIterator const_inputs_iterator; typedef llvm37::iterator_range<inputs_iterator> inputs_range; typedef llvm37::iterator_range<const_inputs_iterator> inputs_const_range; inputs_iterator begin_inputs() { return &Exprs[0] + NumOutputs; } inputs_iterator end_inputs() { return &Exprs[0] + NumOutputs + NumInputs; } inputs_range inputs() { return inputs_range(begin_inputs(), end_inputs()); } const_inputs_iterator begin_inputs() const { return &Exprs[0] + NumOutputs; } const_inputs_iterator end_inputs() const { return &Exprs[0] + NumOutputs + NumInputs; } inputs_const_range inputs() const { return inputs_const_range(begin_inputs(), end_inputs()); } // Output expr iterators. typedef ExprIterator outputs_iterator; typedef ConstExprIterator const_outputs_iterator; typedef llvm37::iterator_range<outputs_iterator> outputs_range; typedef llvm37::iterator_range<const_outputs_iterator> outputs_const_range; outputs_iterator begin_outputs() { return &Exprs[0]; } outputs_iterator end_outputs() { return &Exprs[0] + NumOutputs; } outputs_range outputs() { return outputs_range(begin_outputs(), end_outputs()); } const_outputs_iterator begin_outputs() const { return &Exprs[0]; } const_outputs_iterator end_outputs() const { return &Exprs[0] + NumOutputs; } outputs_const_range outputs() const { return outputs_const_range(begin_outputs(), end_outputs()); } child_range children() { return child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs); } }; /// This represents a GCC inline-assembly statement extension. /// class GCCAsmStmt : public AsmStmt { SourceLocation RParenLoc; StringLiteral *AsmStr; // FIXME: If we wanted to, we could allocate all of these in one big array. StringLiteral **Constraints; StringLiteral **Clobbers; IdentifierInfo **Names; friend class ASTStmtReader; public: GCCAsmStmt(const ASTContext &C, SourceLocation asmloc, bool issimple, bool isvolatile, unsigned numoutputs, unsigned numinputs, IdentifierInfo **names, StringLiteral **constraints, Expr **exprs, StringLiteral *asmstr, unsigned numclobbers, StringLiteral **clobbers, SourceLocation rparenloc); /// \brief Build an empty inline-assembly statement. explicit GCCAsmStmt(EmptyShell Empty) : AsmStmt(GCCAsmStmtClass, Empty), Constraints(nullptr), Clobbers(nullptr), Names(nullptr) { } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } //===--- Asm String Analysis ---===// const StringLiteral *getAsmString() const { return AsmStr; } StringLiteral *getAsmString() { return AsmStr; } void setAsmString(StringLiteral *E) { AsmStr = E; } /// AsmStringPiece - this is part of a decomposed asm string specification /// (for use with the AnalyzeAsmString function below). An asm string is /// considered to be a concatenation of these parts. class AsmStringPiece { public: enum Kind { String, // String in .ll asm string form, "$" -> "$$" and "%%" -> "%". Operand // Operand reference, with optional modifier %c4. }; private: Kind MyKind; std::string Str; unsigned OperandNo; // Source range for operand references. CharSourceRange Range; public: AsmStringPiece(const std::string &S) : MyKind(String), Str(S) {} AsmStringPiece(unsigned OpNo, const std::string &S, SourceLocation Begin, SourceLocation End) : MyKind(Operand), Str(S), OperandNo(OpNo), Range(CharSourceRange::getCharRange(Begin, End)) { } bool isString() const { return MyKind == String; } bool isOperand() const { return MyKind == Operand; } const std::string &getString() const { return Str; } unsigned getOperandNo() const { assert(isOperand()); return OperandNo; } CharSourceRange getRange() const { assert(isOperand() && "Range is currently used only for Operands."); return Range; } /// getModifier - Get the modifier for this operand, if present. This /// returns '\0' if there was no modifier. char getModifier() const; }; /// AnalyzeAsmString - Analyze the asm string of the current asm, decomposing /// it into pieces. If the asm string is erroneous, emit errors and return /// true, otherwise return false. This handles canonicalization and /// translation of strings from GCC syntax to LLVM37 IR syntax, and handles //// flattening of named references like %[foo] to Operand AsmStringPiece's. unsigned AnalyzeAsmString(SmallVectorImpl<AsmStringPiece> &Pieces, const ASTContext &C, unsigned &DiagOffs) const; /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// IdentifierInfo *getOutputIdentifier(unsigned i) const { return Names[i]; } StringRef getOutputName(unsigned i) const { if (IdentifierInfo *II = getOutputIdentifier(i)) return II->getName(); return StringRef(); } StringRef getOutputConstraint(unsigned i) const; const StringLiteral *getOutputConstraintLiteral(unsigned i) const { return Constraints[i]; } StringLiteral *getOutputConstraintLiteral(unsigned i) { return Constraints[i]; } Expr *getOutputExpr(unsigned i); const Expr *getOutputExpr(unsigned i) const { return const_cast<GCCAsmStmt*>(this)->getOutputExpr(i); } //===--- Input operands ---===// IdentifierInfo *getInputIdentifier(unsigned i) const { return Names[i + NumOutputs]; } StringRef getInputName(unsigned i) const { if (IdentifierInfo *II = getInputIdentifier(i)) return II->getName(); return StringRef(); } StringRef getInputConstraint(unsigned i) const; const StringLiteral *getInputConstraintLiteral(unsigned i) const { return Constraints[i + NumOutputs]; } StringLiteral *getInputConstraintLiteral(unsigned i) { return Constraints[i + NumOutputs]; } Expr *getInputExpr(unsigned i); void setInputExpr(unsigned i, Expr *E); const Expr *getInputExpr(unsigned i) const { return const_cast<GCCAsmStmt*>(this)->getInputExpr(i); } private: void setOutputsAndInputsAndClobbers(const ASTContext &C, IdentifierInfo **Names, StringLiteral **Constraints, Stmt **Exprs, unsigned NumOutputs, unsigned NumInputs, StringLiteral **Clobbers, unsigned NumClobbers); public: //===--- Other ---===// /// getNamedOperand - Given a symbolic operand reference like %[foo], /// translate this into a numeric value needed to reference the same operand. /// This returns -1 if the operand name is invalid. int getNamedOperand(StringRef SymbolicName) const; StringRef getClobber(unsigned i) const; StringLiteral *getClobberStringLiteral(unsigned i) { return Clobbers[i]; } const StringLiteral *getClobberStringLiteral(unsigned i) const { return Clobbers[i]; } SourceLocation getLocStart() const LLVM37_READONLY { return AsmLoc; } SourceLocation getLocEnd() const LLVM37_READONLY { return RParenLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == GCCAsmStmtClass; } }; /// This represents a Microsoft inline-assembly statement extension. /// class MSAsmStmt : public AsmStmt { SourceLocation LBraceLoc, EndLoc; StringRef AsmStr; unsigned NumAsmToks; Token *AsmToks; StringRef *Constraints; StringRef *Clobbers; friend class ASTStmtReader; public: MSAsmStmt(const ASTContext &C, SourceLocation asmloc, SourceLocation lbraceloc, bool issimple, bool isvolatile, ArrayRef<Token> asmtoks, unsigned numoutputs, unsigned numinputs, ArrayRef<StringRef> constraints, ArrayRef<Expr*> exprs, StringRef asmstr, ArrayRef<StringRef> clobbers, SourceLocation endloc); /// \brief Build an empty MS-style inline-assembly statement. explicit MSAsmStmt(EmptyShell Empty) : AsmStmt(MSAsmStmtClass, Empty), NumAsmToks(0), AsmToks(nullptr), Constraints(nullptr), Clobbers(nullptr) { } SourceLocation getLBraceLoc() const { return LBraceLoc; } void setLBraceLoc(SourceLocation L) { LBraceLoc = L; } SourceLocation getEndLoc() const { return EndLoc; } void setEndLoc(SourceLocation L) { EndLoc = L; } bool hasBraces() const { return LBraceLoc.isValid(); } unsigned getNumAsmToks() { return NumAsmToks; } Token *getAsmToks() { return AsmToks; } //===--- Asm String Analysis ---===// StringRef getAsmString() const { return AsmStr; } /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// StringRef getOutputConstraint(unsigned i) const { assert(i < NumOutputs); return Constraints[i]; } Expr *getOutputExpr(unsigned i); const Expr *getOutputExpr(unsigned i) const { return const_cast<MSAsmStmt*>(this)->getOutputExpr(i); } //===--- Input operands ---===// StringRef getInputConstraint(unsigned i) const { assert(i < NumInputs); return Constraints[i + NumOutputs]; } Expr *getInputExpr(unsigned i); void setInputExpr(unsigned i, Expr *E); const Expr *getInputExpr(unsigned i) const { return const_cast<MSAsmStmt*>(this)->getInputExpr(i); } //===--- Other ---===// ArrayRef<StringRef> getAllConstraints() const { return llvm37::makeArrayRef(Constraints, NumInputs + NumOutputs); } ArrayRef<StringRef> getClobbers() const { return llvm37::makeArrayRef(Clobbers, NumClobbers); } ArrayRef<Expr*> getAllExprs() const { return llvm37::makeArrayRef(reinterpret_cast<Expr**>(Exprs), NumInputs + NumOutputs); } StringRef getClobber(unsigned i) const { return getClobbers()[i]; } private: void initialize(const ASTContext &C, StringRef AsmString, ArrayRef<Token> AsmToks, ArrayRef<StringRef> Constraints, ArrayRef<Expr*> Exprs, ArrayRef<StringRef> Clobbers); public: SourceLocation getLocStart() const LLVM37_READONLY { return AsmLoc; } SourceLocation getLocEnd() const LLVM37_READONLY { return EndLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == MSAsmStmtClass; } child_range children() { return child_range(&Exprs[0], &Exprs[NumInputs + NumOutputs]); } }; class SEHExceptStmt : public Stmt { SourceLocation Loc; Stmt *Children[2]; enum { FILTER_EXPR, BLOCK }; SEHExceptStmt(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); friend class ASTReader; friend class ASTStmtReader; explicit SEHExceptStmt(EmptyShell E) : Stmt(SEHExceptStmtClass, E) { } public: static SEHExceptStmt* Create(const ASTContext &C, SourceLocation ExceptLoc, Expr *FilterExpr, Stmt *Block); SourceLocation getLocStart() const LLVM37_READONLY { return getExceptLoc(); } SourceLocation getLocEnd() const LLVM37_READONLY { return getEndLoc(); } SourceLocation getExceptLoc() const { return Loc; } SourceLocation getEndLoc() const { return getBlock()->getLocEnd(); } Expr *getFilterExpr() const { return reinterpret_cast<Expr*>(Children[FILTER_EXPR]); } CompoundStmt *getBlock() const { return cast<CompoundStmt>(Children[BLOCK]); } child_range children() { return child_range(Children,Children+2); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHExceptStmtClass; } }; class SEHFinallyStmt : public Stmt { SourceLocation Loc; Stmt *Block; SEHFinallyStmt(SourceLocation Loc, Stmt *Block); friend class ASTReader; friend class ASTStmtReader; explicit SEHFinallyStmt(EmptyShell E) : Stmt(SEHFinallyStmtClass, E) { } public: static SEHFinallyStmt* Create(const ASTContext &C, SourceLocation FinallyLoc, Stmt *Block); SourceLocation getLocStart() const LLVM37_READONLY { return getFinallyLoc(); } SourceLocation getLocEnd() const LLVM37_READONLY { return getEndLoc(); } SourceLocation getFinallyLoc() const { return Loc; } SourceLocation getEndLoc() const { return Block->getLocEnd(); } CompoundStmt *getBlock() const { return cast<CompoundStmt>(Block); } child_range children() { return child_range(&Block,&Block+1); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHFinallyStmtClass; } }; class SEHTryStmt : public Stmt { bool IsCXXTry; SourceLocation TryLoc; Stmt *Children[2]; enum { TRY = 0, HANDLER = 1 }; SEHTryStmt(bool isCXXTry, // true if 'try' otherwise '__try' SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); friend class ASTReader; friend class ASTStmtReader; explicit SEHTryStmt(EmptyShell E) : Stmt(SEHTryStmtClass, E) { } public: static SEHTryStmt* Create(const ASTContext &C, bool isCXXTry, SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); SourceLocation getLocStart() const LLVM37_READONLY { return getTryLoc(); } SourceLocation getLocEnd() const LLVM37_READONLY { return getEndLoc(); } SourceLocation getTryLoc() const { return TryLoc; } SourceLocation getEndLoc() const { return Children[HANDLER]->getLocEnd(); } bool getIsCXXTry() const { return IsCXXTry; } CompoundStmt* getTryBlock() const { return cast<CompoundStmt>(Children[TRY]); } Stmt *getHandler() const { return Children[HANDLER]; } /// Returns 0 if not defined SEHExceptStmt *getExceptHandler() const; SEHFinallyStmt *getFinallyHandler() const; child_range children() { return child_range(Children,Children+2); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHTryStmtClass; } }; /// Represents a __leave statement. /// class SEHLeaveStmt : public Stmt { SourceLocation LeaveLoc; public: explicit SEHLeaveStmt(SourceLocation LL) : Stmt(SEHLeaveStmtClass), LeaveLoc(LL) {} /// \brief Build an empty __leave statement. explicit SEHLeaveStmt(EmptyShell Empty) : Stmt(SEHLeaveStmtClass, Empty) { } SourceLocation getLeaveLoc() const { return LeaveLoc; } void setLeaveLoc(SourceLocation L) { LeaveLoc = L; } SourceLocation getLocStart() const LLVM37_READONLY { return LeaveLoc; } SourceLocation getLocEnd() const LLVM37_READONLY { return LeaveLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHLeaveStmtClass; } // Iterators child_range children() { return child_range(); } }; /// \brief This captures a statement into a function. For example, the following /// pragma annotated compound statement can be represented as a CapturedStmt, /// and this compound statement is the body of an anonymous outlined function. /// @code /// #pragma omp parallel /// { /// compute(); /// } /// @endcode class CapturedStmt : public Stmt { public: /// \brief The different capture forms: by 'this', by reference, capture for /// variable-length array type etc. enum VariableCaptureKind { VCK_This, VCK_ByRef, VCK_VLAType, }; /// \brief Describes the capture of either a variable, or 'this', or /// variable-length array type. class Capture { llvm37::PointerIntPair<VarDecl *, 2, VariableCaptureKind> VarAndKind; SourceLocation Loc; public: /// \brief Create a new capture. /// /// \param Loc The source location associated with this capture. /// /// \param Kind The kind of capture (this, ByRef, ...). /// /// \param Var The variable being captured, or null if capturing this. /// Capture(SourceLocation Loc, VariableCaptureKind Kind, VarDecl *Var = nullptr) : VarAndKind(Var, Kind), Loc(Loc) { switch (Kind) { case VCK_This: assert(!Var && "'this' capture cannot have a variable!"); break; case VCK_ByRef: assert(Var && "capturing by reference must have a variable!"); break; case VCK_VLAType: assert(!Var && "Variable-length array type capture cannot have a variable!"); break; } } /// \brief Determine the kind of capture. VariableCaptureKind getCaptureKind() const { return VarAndKind.getInt(); } /// \brief Retrieve the source location at which the variable or 'this' was /// first used. SourceLocation getLocation() const { return Loc; } /// \brief Determine whether this capture handles the C++ 'this' pointer. bool capturesThis() const { return getCaptureKind() == VCK_This; } /// \brief Determine whether this capture handles a variable. bool capturesVariable() const { return getCaptureKind() == VCK_ByRef; } /// \brief Determine whether this capture handles a variable-length array /// type. bool capturesVariableArrayType() const { return getCaptureKind() == VCK_VLAType; } /// \brief Retrieve the declaration of the variable being captured. /// /// This operation is only valid if this capture captures a variable. VarDecl *getCapturedVar() const { assert(capturesVariable() && "No variable available for 'this' or VAT capture"); return VarAndKind.getPointer(); } friend class ASTStmtReader; }; private: /// \brief The number of variable captured, including 'this'. unsigned NumCaptures; /// \brief The pointer part is the implicit the outlined function and the /// int part is the captured region kind, 'CR_Default' etc. llvm37::PointerIntPair<CapturedDecl *, 1, CapturedRegionKind> CapDeclAndKind; /// \brief The record for captured variables, a RecordDecl or CXXRecordDecl. RecordDecl *TheRecordDecl; /// \brief Construct a captured statement. CapturedStmt(Stmt *S, CapturedRegionKind Kind, ArrayRef<Capture> Captures, ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD); /// \brief Construct an empty captured statement. CapturedStmt(EmptyShell Empty, unsigned NumCaptures); Stmt **getStoredStmts() const { return reinterpret_cast<Stmt **>(const_cast<CapturedStmt *>(this) + 1); } Capture *getStoredCaptures() const; void setCapturedStmt(Stmt *S) { getStoredStmts()[NumCaptures] = S; } public: static CapturedStmt *Create(const ASTContext &Context, Stmt *S, CapturedRegionKind Kind, ArrayRef<Capture> Captures, ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD); static CapturedStmt *CreateDeserialized(const ASTContext &Context, unsigned NumCaptures); /// \brief Retrieve the statement being captured. Stmt *getCapturedStmt() { return getStoredStmts()[NumCaptures]; } const Stmt *getCapturedStmt() const { return const_cast<CapturedStmt *>(this)->getCapturedStmt(); } /// \brief Retrieve the outlined function declaration. CapturedDecl *getCapturedDecl() { return CapDeclAndKind.getPointer(); } const CapturedDecl *getCapturedDecl() const { return const_cast<CapturedStmt *>(this)->getCapturedDecl(); } /// \brief Set the outlined function declaration. void setCapturedDecl(CapturedDecl *D) { assert(D && "null CapturedDecl"); CapDeclAndKind.setPointer(D); } /// \brief Retrieve the captured region kind. CapturedRegionKind getCapturedRegionKind() const { return CapDeclAndKind.getInt(); } /// \brief Set the captured region kind. void setCapturedRegionKind(CapturedRegionKind Kind) { CapDeclAndKind.setInt(Kind); } /// \brief Retrieve the record declaration for captured variables. const RecordDecl *getCapturedRecordDecl() const { return TheRecordDecl; } /// \brief Set the record declaration for captured variables. void setCapturedRecordDecl(RecordDecl *D) { assert(D && "null RecordDecl"); TheRecordDecl = D; } /// \brief True if this variable has been captured. bool capturesVariable(const VarDecl *Var) const; /// \brief An iterator that walks over the captures. typedef Capture *capture_iterator; typedef const Capture *const_capture_iterator; typedef llvm37::iterator_range<capture_iterator> capture_range; typedef llvm37::iterator_range<const_capture_iterator> capture_const_range; capture_range captures() { return capture_range(capture_begin(), capture_end()); } capture_const_range captures() const { return capture_const_range(capture_begin(), capture_end()); } /// \brief Retrieve an iterator pointing to the first capture. capture_iterator capture_begin() { return getStoredCaptures(); } const_capture_iterator capture_begin() const { return getStoredCaptures(); } /// \brief Retrieve an iterator pointing past the end of the sequence of /// captures. capture_iterator capture_end() const { return getStoredCaptures() + NumCaptures; } /// \brief Retrieve the number of captures, including 'this'. unsigned capture_size() const { return NumCaptures; } /// \brief Iterator that walks over the capture initialization arguments. typedef Expr **capture_init_iterator; typedef llvm37::iterator_range<capture_init_iterator> capture_init_range; capture_init_range capture_inits() const { return capture_init_range(capture_init_begin(), capture_init_end()); } /// \brief Retrieve the first initialization argument. capture_init_iterator capture_init_begin() const { return reinterpret_cast<Expr **>(getStoredStmts()); } /// \brief Retrieve the iterator pointing one past the last initialization /// argument. capture_init_iterator capture_init_end() const { return capture_init_begin() + NumCaptures; } SourceLocation getLocStart() const LLVM37_READONLY { return getCapturedStmt()->getLocStart(); } SourceLocation getLocEnd() const LLVM37_READONLY { return getCapturedStmt()->getLocEnd(); } SourceRange getSourceRange() const LLVM37_READONLY { return getCapturedStmt()->getSourceRange(); } static bool classof(const Stmt *T) { return T->getStmtClass() == CapturedStmtClass; } child_range children(); friend class ASTStmtReader; }; } // end namespace clang #endif
zlantr.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @precisions normal z -> s d c * **/ #include "plasma.h" #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_tuning.h" #include "plasma_types.h" /***************************************************************************//** * * @ingroup plasma_lantr * * Returns the norm of a trapezoidal or triangular matrix as * * zlantr = ( max(abs(A(i,j))), NORM = PlasmaMaxNorm * ( * ( norm1(A), NORM = PlasmaOneNorm * ( * ( normI(A), NORM = PlasmaInfNorm * ( * ( normF(A), NORM = PlasmaFrobeniusNorm * * where norm1 denotes the one norm of a matrix (maximum column sum), * normI denotes the infinity norm of a matrix (maximum row sum) and * normF denotes the Frobenius norm of a matrix (square root of sum * of squares). Note that max(abs(A(i,j))) is not a consistent matrix * norm. * ******************************************************************************* * * @param[in] norm * - PlasmaMaxNorm: max norm * - PlasmaOneNorm: one norm * - PlasmaInfNorm: infinity norm * - PlasmaFrobeniusNorm: Frobenius norm * * @param[in] uplo * - PlasmaUpper: Upper triangle of A is stored; * - PlasmaLower: Lower triangle of A is stored. * * @param[in] diag * - PlasmaNonUnit: A has non-unit diagonal, * - PlasmaUnit: A has unit diagonal. * * @param[in] m * The number of rows of the matrix A. m >= 0. When m = 0, * the returned value is set to zero. * * @param[in] n * The number of columns of the matrix A. n >= 0. When n = 0, * the returned value is set to zero. * * @param[in] pA * The m-by-n trapezoidal matrix A. * * @param[in] lda * The leading dimension of the array A. lda >= max(1,m). * ******************************************************************************* * * @retval double * The specified norm of the trapezoidal or triangular matrix A. * ******************************************************************************* * * @sa plasma_omp_zlantr * @sa plasma_clantr * @sa plasma_dlantr * @sa plasma_slantr * ******************************************************************************/ double plasma_zlantr(plasma_enum_t norm, plasma_enum_t uplo, plasma_enum_t diag, int m, int n, plasma_complex64_t *pA, int lda) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_error("PLASMA not initialized"); return PlasmaErrorNotInitialized; } // Check input arguments. if ((norm != PlasmaMaxNorm) && (norm != PlasmaOneNorm) && (norm != PlasmaInfNorm) && (norm != PlasmaFrobeniusNorm) ) { plasma_error("illegal value of norm"); return -1; } if ((uplo != PlasmaUpper) && (uplo != PlasmaLower)) { plasma_error("illegal value of uplo"); return -2; } if ((diag != PlasmaUnit) && (diag != PlasmaNonUnit)) { plasma_error("illegal value of diag"); return -3; } if (m < 0) { plasma_error("illegal value of m"); return -4; } if (n < 0) { plasma_error("illegal value of n"); return -5; } if (lda < imax(1, m)) { printf("%d\n", lda); plasma_error("illegal value of lda"); return -7; } // quick return if (imin(n, m) == 0) return 0.0; // Tune parameters. if (plasma->tuning) plasma_tune_lantr(plasma, PlasmaComplexDouble, m, n); // Set tiling parameters. int nb = plasma->nb; // Create tile matrices. plasma_desc_t A; int retval; retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb, m, n, 0, 0, m, n, &A); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); return retval; } // Allocate workspace. double *work = NULL; switch (norm) { case PlasmaMaxNorm: work = (double*)malloc((size_t)A.mt*A.nt*sizeof(double)); break; case PlasmaOneNorm: work = (double*)calloc(((size_t)A.mt*A.n+A.n), sizeof(double)); break; case PlasmaInfNorm: work = (double*)calloc(((size_t)A.nt*A.m+A.m), sizeof(double)); break; case PlasmaFrobeniusNorm: work = (double*)calloc((size_t)2*A.mt*A.nt, sizeof(double)); break; } if (work == NULL) { plasma_error("malloc() failed"); return PlasmaErrorOutOfMemory; } // Initialize sequence. plasma_sequence_t sequence; retval = plasma_sequence_init(&sequence); // Initialize request. plasma_request_t request; retval = plasma_request_init(&request); double value; // asynchronous block #pragma omp parallel #pragma omp master { // Translate to tile layout. plasma_omp_zge2desc(pA, lda, A, &sequence, &request); // Call tile async function. plasma_omp_zlantr(norm, uplo, diag, A, work, &value, &sequence, &request); } // implicit synchronization free(work); // Free matrix in tile layout. plasma_desc_destroy(&A); // Return the norm. return value; } /***************************************************************************//** * * @ingroup plasma_lantr * * Calculates the max, one, infinity or Frobenius norm of a general matrix. * Non-blocking equivalent of plasma_zlantr(). May return before the * computation is finished. Operates on matrices stored by tiles. All matrices * are passed through descriptors. All dimensions are taken from the * descriptors. Allows for pipelining of operations at runtime. * ******************************************************************************* * * @param[in] norm * - PlasmaMaxNorm: Max norm * - PlasmaOneNorm: One norm * - PlasmaInfNorm: Infinity norm * - PlasmaFrobeniusNorm: Frobenius norm * * @param[in] uplo * - PlasmaUpper: Upper triangle of A is stored; * - PlasmaLower: Lower triangle of A is stored. * * @param[in] diag * - PlasmaNonUnit: A has non-unit diagonal, * - PlasmaUnit: A has unit diagonal. * * @param[in] A * The descriptor of matrix A. * * @param[out] work * Workspace of size: * - PlasmaMaxNorm: A.mt*A.nt * - PlasmaOneNorm: A.mt*A.n + A.n * - PlasmaInfNorm: A.mt*A.n + A.n * - PlasmaFrobeniusNorm: 2*A.mt*A.nt * * @param[out] value * The calculated value of the norm requested. * * @param[in] sequence * Identifies the sequence of function calls that this call belongs to * (for completion checks and exception handling purposes). * * @param[out] request * Identifies this function call (for exception handling purposes). * * @retval void * Errors are returned by setting sequence->status and * request->status to error values. The sequence->status and * request->status should never be set to PlasmaSuccess (the * initial values) since another async call may be setting a * failure value at the same time. * ******************************************************************************* * * @sa plasma_zlantr * @sa plasma_omp_clantr * @sa plasma_omp_dlantr * @sa plasma_omp_slantr * ******************************************************************************/ void plasma_omp_zlantr(plasma_enum_t norm, plasma_enum_t uplo, plasma_enum_t diag, plasma_desc_t A, double *work, double *value, plasma_sequence_t *sequence, plasma_request_t *request) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_error("PLASMA not initialized"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // Check input arguments. if ((norm != PlasmaMaxNorm) && (norm != PlasmaOneNorm) && (norm != PlasmaInfNorm) && (norm != PlasmaFrobeniusNorm)) { plasma_error("illegal value of norm"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if ((uplo != PlasmaUpper) && (uplo != PlasmaLower)) { plasma_error("illegal value of uplo"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if ((diag != PlasmaUnit) && (diag != PlasmaNonUnit)) { plasma_error("illegal value of diag"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(A) != PlasmaSuccess) { plasma_error("invalid descriptor A"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (sequence == NULL) { plasma_error("NULL sequence"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (request == NULL) { plasma_error("NULL request"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // quick return if (imin(A.m, A.n) == 0) { *value = 0.0; return; } // Call the parallel function. plasma_pzlantr(norm, uplo, diag, A, work, value, sequence, request); }
kgraph-data.h
#ifndef WDONG_KGRAPH_DATA #define WDONG_KGRAPH_DATA #include <cmath> #include <cstring> #include <cstdlib> #include <vector> #include <fstream> #include <stdexcept> #include <boost/assert.hpp> #ifdef __GNUC__ #ifdef __AVX__ #define KGRAPH_MATRIX_ALIGN 32 #else #ifdef __SSE2__ #define KGRAPH_MATRIX_ALIGN 16 #else #define KGRAPH_MATRIX_ALIGN 4 #endif #endif #else #define KGRAPH_MATRIX_ALIGN 32 #endif namespace kgraph { /// L2 square distance with AVX instructions. /** AVX instructions have strong alignment requirement for t1 and t2. */ extern float float_l2sqr_avx (float const *t1, float const *t2, unsigned dim); /// L2 square distance with SSE2 instructions. extern float float_l2sqr_sse2 (float const *t1, float const *t2, unsigned dim); extern float float_l2sqr_sse2 (float const *, unsigned dim); extern float float_dot_sse2 (float const *, float const *, unsigned dim); /// L2 square distance for uint8_t with SSE2 instructions (for SIFT). extern float uint8_l2sqr_sse2 (uint8_t const *t1, uint8_t const *t2, unsigned dim); extern float float_l2sqr (float const *, float const *, unsigned dim); extern float float_l2sqr (float const *, unsigned dim); extern float float_dot (float const *, float const *, unsigned dim); using std::vector; /// namespace for various distance metrics. namespace metric { /// L2 square distance. struct l2sqr { template <typename T> /// L2 square distance. static float apply (T const *t1, T const *t2, unsigned dim) { float r = 0; for (unsigned i = 0; i < dim; ++i) { float v = float(t1[i]) - float(t2[i]); v *= v; r += v; } return r; } /// inner product. template <typename T> static float dot (T const *t1, T const *t2, unsigned dim) { float r = 0; for (unsigned i = 0; i < dim; ++i) { r += float(t1[i]) *float(t2[i]); } return r; } /// L2 norm. template <typename T> static float norm2 (T const *t1, unsigned dim) { float r = 0; for (unsigned i = 0; i < dim; ++i) { float v = float(t1[i]); v *= v; r += v; } return r; } }; struct l2 { template <typename T> static float apply (T const *t1, T const *t2, unsigned dim) { return sqrt(l2sqr::apply<T>(t1, t2, dim)); } }; } /// Matrix data. template <typename T, unsigned A = KGRAPH_MATRIX_ALIGN> class Matrix { unsigned col; unsigned row; size_t stride; char *data; void reset (unsigned r, unsigned c) { row = r; col = c; stride = (sizeof(T) * c + A - 1) / A * A; /* data.resize(row * stride); */ if (data) free(data); data = (char *)memalign(A, row * stride); // SSE instruction needs data to be aligned if (!data) throw runtime_error("memalign"); } public: Matrix (): col(0), row(0), stride(0), data(0) {} Matrix (unsigned r, unsigned c): data(0) { reset(r, c); } ~Matrix () { if (data) free(data); } unsigned size () const { return row; } unsigned dim () const { return col; } size_t step () const { return stride; } void resize (unsigned r, unsigned c) { reset(r, c); } T const *operator [] (unsigned i) const { return reinterpret_cast<T const *>(&data[stride * i]); } T *operator [] (unsigned i) { return reinterpret_cast<T *>(&data[stride * i]); } void zero () { memset(data, 0, row * stride); } void normalize2 () { #pragma omp parallel for for (unsigned i = 0; i < row; ++i) { T *p = operator[](i); double sum = metric::l2sqr::norm2(p, col); sum = std::sqrt(sum); for (unsigned j = 0; j < col; ++j) { p[j] /= sum; } } } void load (const std::string &path, unsigned dim, unsigned skip = 0, unsigned gap = 0) { std::ifstream is(path.c_str(), std::ios::binary); if (!is) throw io_error(path); is.seekg(0, std::ios::end); size_t size = is.tellg(); size -= skip; unsigned line = sizeof(T) * dim + gap; unsigned N = size / line; reset(N, dim); zero(); is.seekg(skip, std::ios::beg); for (unsigned i = 0; i < N; ++i) { is.read(&data[stride * i], sizeof(T) * dim); is.seekg(gap, std::ios::cur); } if (!is) throw io_error(path); } void load_lshkit (std::string const &path) { static const unsigned LSHKIT_HEADER = 3; std::ifstream is(path.c_str(), std::ios::binary); unsigned header[LSHKIT_HEADER]; /* entry size, row, col */ is.read((char *)header, sizeof header); if (!is) throw io_error(path); if (header[0] != sizeof(T)) throw io_error(path); is.close(); unsigned D = header[2]; unsigned skip = LSHKIT_HEADER * sizeof(unsigned); unsigned gap = 0; load(path, D, skip, gap); } void save_lshkit (std::string const &path) { std::ofstream os(path.c_str(), std::ios::binary); unsigned header[3]; assert(sizeof header == 3*4); header[0] = sizeof(T); header[1] = row; header[2] = col; os.write((const char *)header, sizeof(header)); for (unsigned i = 0; i < row; ++i) { os.write(&data[stride * i], sizeof(T) * col); } } }; /// Matrix proxy to interface with 3rd party libraries (FLANN, OpenCV, NumPy). template <typename DATA_TYPE, unsigned A = KGRAPH_MATRIX_ALIGN> class MatrixProxy { unsigned rows; unsigned cols; // # elements, not bytes, in a row, size_t stride; // # bytes in a row, >= cols * sizeof(element) uint8_t const *data; public: MatrixProxy (Matrix<DATA_TYPE> const &m) : rows(m.size()), cols(m.dim()), stride(m.step()), data(reinterpret_cast<uint8_t const *>(m[0])) { } #ifndef __AVX__ #ifdef FLANN_DATASET_H_ /// Construct from FLANN matrix. MatrixProxy (flann::Matrix<DATA_TYPE> const &m) : rows(m.rows), cols(m.cols), stride(m.stride), data(m.data) { if (stride % A) throw invalid_argument("bad alignment"); } #endif #ifdef __OPENCV_CORE_HPP__ /// Construct from OpenCV matrix. MatrixProxy (cv::Mat const &m) : rows(m.rows), cols(m.cols), stride(m.step), data(m.data) { if (stride % A) throw invalid_argument("bad alignment"); } #endif #ifdef NPY_NDARRAYOBJECT_H /// Construct from NumPy matrix. MatrixProxy (PyArrayObject *obj) { if (!obj || (obj->nd != 2)) throw invalid_argument("bad array shape"); rows = obj->dimensions[0]; cols = obj->dimensions[1]; stride = obj->strides[0]; data = reinterpret_cast<uint8_t const *>(obj->data); if (obj->descr->elsize != sizeof(DATA_TYPE)) throw invalid_argument("bad data type size"); if (stride % A) throw invalid_argument("bad alignment"); if (!(stride >= cols * sizeof(DATA_TYPE))) throw invalid_argument("bad stride"); } #endif #endif unsigned size () const { return rows; } unsigned dim () const { return cols; } DATA_TYPE const *operator [] (unsigned i) const { return reinterpret_cast<DATA_TYPE const *>(data + stride * i); } DATA_TYPE *operator [] (unsigned i) { return const_cast<DATA_TYPE *>(reinterpret_cast<DATA_TYPE const *>(data + stride * i)); } }; /// Oracle for Matrix or MatrixProxy. /** DATA_TYPE can be Matrix or MatrixProxy, * DIST_TYPE should be one class within the namespace kgraph.metric. */ template <typename DATA_TYPE, typename DIST_TYPE> class MatrixOracle: public kgraph::IndexOracle { MatrixProxy<DATA_TYPE> proxy; public: class SearchOracle: public kgraph::SearchOracle { MatrixProxy<DATA_TYPE> proxy; DATA_TYPE const *query; public: SearchOracle (MatrixProxy<DATA_TYPE> const &p, DATA_TYPE const *q): proxy(p), query(q) { } virtual unsigned size () const { return proxy.size(); } virtual float operator () (unsigned i) const { return DIST_TYPE::apply(proxy[i], query, proxy.dim()); } }; template <typename MATRIX_TYPE> MatrixOracle (MATRIX_TYPE const &m): proxy(m) { } virtual unsigned size () const { return proxy.size(); } virtual float operator () (unsigned i, unsigned j) const { return DIST_TYPE::apply(proxy[i], proxy[j], proxy.dim()); } SearchOracle query (DATA_TYPE const *query) const { return SearchOracle(proxy, query); } }; inline float AverageRecall (Matrix<float> const &gs, Matrix<float> const &result, unsigned K = 0) { if (K == 0) { K = result.dim(); } if (!(gs.dim() >= K)) throw std::invalid_argument("gs.dim() >= K"); if (!(result.dim() >= K)) throw std::invalid_argument("result.dim() >= K"); if (!(gs.size() >= result.size())) throw std::invalid_argument("gs.size() > result.size()"); float sum = 0; for (unsigned i = 0; i < result.size(); ++i) { float const *gs_row = gs[i]; float const *re_row = result[i]; // compare unsigned found = 0; unsigned gs_n = 0; unsigned re_n = 0; while ((gs_n < K) && (re_n < K)) { if (gs_row[gs_n] < re_row[re_n]) { ++gs_n; } else if (gs_row[gs_n] == re_row[re_n]) { ++found; ++gs_n; ++re_n; } else { throw std::runtime_error("distance is unstable"); } } sum += float(found) / K; } return sum / result.size(); } } #ifndef KGRAPH_NO_VECTORIZE #ifdef __GNUC__ #ifdef __AVX__ #if 0 namespace kgraph { namespace metric { template <> inline float l2sqr::apply<float> (float const *t1, float const *t2, unsigned dim) { return float_l2sqr_avx(t1, t2, dim); } }} #endif #else #ifdef __SSE2__ namespace kgraph { namespace metric { template <> inline float l2sqr::apply<float> (float const *t1, float const *t2, unsigned dim) { return float_l2sqr_sse2(t1, t2, dim); } template <> inline float l2sqr::dot<float> (float const *t1, float const *t2, unsigned dim) { return float_dot_sse2(t1, t2, dim); } template <> inline float l2sqr::norm2<float> (float const *t1, unsigned dim) { return float_l2sqr_sse2(t1, dim); } template <> inline float l2sqr::apply<uint8_t> (uint8_t const *t1, uint8_t const *t2, unsigned dim) { return uint8_l2sqr_sse2(t1, t2, dim); } }} #endif #endif #endif #endif #endif
grad.c
/* Author: Mohammed Al Farhan Email: mohammed.farhan@kaust.edu.sa */ #include <stdio.h> #include <string.h> #include <stdint.h> #include <omp.h> #include "inc/ktime.h" #include "inc/geometry.h" #include "inc/ker/phy.h" /* Calculates the residual */ void compute_grad(struct grad *restrict grad) { struct ktime ktime; setktime(&ktime); const size_t bsz = grad->bsz; const size_t dofs = grad->dofs; const uint32_t *restrict ie = grad->ie; const uint32_t *restrict part = grad->part; const uint32_t *restrict n0 = grad->n0; const uint32_t *restrict n1 = grad->n1; const double *restrict q = grad->q; const double *restrict w0termsx = grad->w0termsx; const double *restrict w0termsy = grad->w0termsy; const double *restrict w0termsz = grad->w0termsz; const double *restrict w1termsx = grad->w1termsx; const double *restrict w1termsy = grad->w1termsy; const double *restrict w1termsz = grad->w1termsz; double *restrict gradx0 = grad->gradx0; double *restrict gradx1 = grad->gradx1; double *restrict gradx2 = grad->gradx2; memset(gradx0, 0, dofs * sizeof(double)); memset(gradx1, 0, dofs * sizeof(double)); memset(gradx2, 0, dofs * sizeof(double)); __assume_aligned(gradx0, 64); __assume_aligned(gradx1, 64); __assume_aligned(gradx2, 64); /* Calculates the gradients at the nodes using weighted least squares This solves using Gram-Schmidt */ #pragma omp parallel { const uint32_t t = omp_get_thread_num(); const uint32_t ie0 = ie[t]; const uint32_t ie1 = ie[t+1]; uint32_t i; for(i = ie0; i < ie1; i++) { const uint32_t node0 = n0[i]; const uint32_t node1 = n1[i]; const uint32_t idx0 = bsz * node0; const uint32_t idx1 = bsz * node1; double dq; double termx; double termy; double termz; if(part[node0] == t) { termx = w0termsx[i]; termy = w0termsy[i]; termz = w0termsz[i]; dq = q[idx1 + 0] - q[idx0 + 0]; gradx0[idx0 + 0] += termx * dq; gradx1[idx0 + 0] += termy * dq; gradx2[idx0 + 0] += termz * dq; dq = q[idx1 + 1] - q[idx0 + 1]; gradx0[idx0 + 1] += termx * dq; gradx1[idx0 + 1] += termy * dq; gradx2[idx0 + 1] += termz * dq; dq = q[idx1 + 2] - q[idx0 + 2]; gradx0[idx0 + 2] += termx * dq; gradx1[idx0 + 2] += termy * dq; gradx2[idx0 + 2] += termz * dq; dq = q[idx1 + 3] - q[idx0 + 3]; gradx0[idx0 + 3] += termx * dq; gradx1[idx0 + 3] += termy * dq; gradx2[idx0 + 3] += termz * dq; } if(part[node1] == t) { termx = w1termsx[i]; termy = w1termsy[i]; termz = w1termsz[i]; dq = q[idx0 + 0] - q[idx1 + 0]; gradx0[idx1 + 0] += termx * dq; gradx1[idx1 + 0] += termy * dq; gradx2[idx1 + 0] += termz * dq; dq = q[idx0 + 1] - q[idx1 + 1]; gradx0[idx1 + 1] += termx * dq; gradx1[idx1 + 1] += termy * dq; gradx2[idx1 + 1] += termz * dq; dq = q[idx0 + 2] - q[idx1 + 2]; gradx0[idx1 + 2] += termx * dq; gradx1[idx1 + 2] += termy * dq; gradx2[idx1 + 2] += termz * dq; dq = q[idx0 + 3] - q[idx1 + 3]; gradx0[idx1 + 3] += termx * dq; gradx1[idx1 + 3] += termy * dq; gradx2[idx1 + 3] += termz * dq; } } } compute_time(&ktime, grad->t); }
ASTMatchers.h
//===- ASTMatchers.h - Structural query framework ---------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file implements matchers to be used together with the MatchFinder to // match AST nodes. // // Matchers are created by generator functions, which can be combined in // a functional in-language DSL to express queries over the C++ AST. // // For example, to match a class with a certain name, one would call: // cxxRecordDecl(hasName("MyClass")) // which returns a matcher that can be used to find all AST nodes that declare // a class named 'MyClass'. // // For more complicated match expressions we're often interested in accessing // multiple parts of the matched AST nodes once a match is found. In that case, // use the id(...) matcher around the match expressions that match the nodes // you want to access. // // For example, when we're interested in child classes of a certain class, we // would write: // cxxRecordDecl(hasName("MyClass"), has(id("child", recordDecl()))) // When the match is found via the MatchFinder, a user provided callback will // be called with a BoundNodes instance that contains a mapping from the // strings that we provided for the id(...) calls to the nodes that were // matched. // In the given example, each time our matcher finds a match we get a callback // where "child" is bound to the RecordDecl node of the matching child // class declaration. // // See ASTMatchersInternal.h for a more in-depth explanation of the // implementation details of the matcher framework. // // See ASTMatchFinder.h for how to use the generated matchers to run over // an AST. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H #define LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H #include "clang/AST/ASTContext.h" #include "clang/AST/ASTTypeTraits.h" #include "clang/AST/Attr.h" #include "clang/AST/Decl.h" #include "clang/AST/DeclCXX.h" #include "clang/AST/DeclFriend.h" #include "clang/AST/DeclObjC.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/NestedNameSpecifier.h" #include "clang/AST/OpenMPClause.h" #include "clang/AST/OperationKinds.h" #include "clang/AST/Stmt.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/StmtObjC.h" #include "clang/AST/StmtOpenMP.h" #include "clang/AST/TemplateBase.h" #include "clang/AST/TemplateName.h" #include "clang/AST/Type.h" #include "clang/AST/TypeLoc.h" #include "clang/ASTMatchers/ASTMatchersInternal.h" #include "clang/ASTMatchers/ASTMatchersMacros.h" #include "clang/Basic/AttrKinds.h" #include "clang/Basic/ExceptionSpecificationType.h" #include "clang/Basic/IdentifierTable.h" #include "clang/Basic/LLVM.h" #include "clang/Basic/SourceManager.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TypeTraits.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringRef.h" #include "llvm/Support/Casting.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/Regex.h" #include <cassert> #include <cstddef> #include <iterator> #include <limits> #include <string> #include <utility> #include <vector> namespace clang { namespace ast_matchers { /// Maps string IDs to AST nodes matched by parts of a matcher. /// /// The bound nodes are generated by calling \c bind("id") on the node matchers /// of the nodes we want to access later. /// /// The instances of BoundNodes are created by \c MatchFinder when the user's /// callbacks are executed every time a match is found. class BoundNodes { public: /// Returns the AST node bound to \c ID. /// /// Returns NULL if there was no node bound to \c ID or if there is a node but /// it cannot be converted to the specified type. template <typename T> const T *getNodeAs(StringRef ID) const { return MyBoundNodes.getNodeAs<T>(ID); } /// Type of mapping from binding identifiers to bound nodes. This type /// is an associative container with a key type of \c std::string and a value /// type of \c clang::ast_type_traits::DynTypedNode using IDToNodeMap = internal::BoundNodesMap::IDToNodeMap; /// Retrieve mapping from binding identifiers to bound nodes. const IDToNodeMap &getMap() const { return MyBoundNodes.getMap(); } private: friend class internal::BoundNodesTreeBuilder; /// Create BoundNodes from a pre-filled map of bindings. BoundNodes(internal::BoundNodesMap &MyBoundNodes) : MyBoundNodes(MyBoundNodes) {} internal::BoundNodesMap MyBoundNodes; }; /// If the provided matcher matches a node, binds the node to \c ID. /// /// FIXME: Do we want to support this now that we have bind()? template <typename T> internal::Matcher<T> id(StringRef ID, const internal::BindableMatcher<T> &InnerMatcher) { return InnerMatcher.bind(ID); } /// Types of matchers for the top-level classes in the AST class /// hierarchy. /// @{ using DeclarationMatcher = internal::Matcher<Decl>; using StatementMatcher = internal::Matcher<Stmt>; using TypeMatcher = internal::Matcher<QualType>; using TypeLocMatcher = internal::Matcher<TypeLoc>; using NestedNameSpecifierMatcher = internal::Matcher<NestedNameSpecifier>; using NestedNameSpecifierLocMatcher = internal::Matcher<NestedNameSpecifierLoc>; using CXXCtorInitializerMatcher = internal::Matcher<CXXCtorInitializer>; /// @} /// Matches any node. /// /// Useful when another matcher requires a child matcher, but there's no /// additional constraint. This will often be used with an explicit conversion /// to an \c internal::Matcher<> type such as \c TypeMatcher. /// /// Example: \c DeclarationMatcher(anything()) matches all declarations, e.g., /// \code /// "int* p" and "void f()" in /// int* p; /// void f(); /// \endcode /// /// Usable as: Any Matcher inline internal::TrueMatcher anything() { return internal::TrueMatcher(); } /// Matches the top declaration context. /// /// Given /// \code /// int X; /// namespace NS { /// int Y; /// } // namespace NS /// \endcode /// decl(hasDeclContext(translationUnitDecl())) /// matches "int X", but not "int Y". extern const internal::VariadicDynCastAllOfMatcher<Decl, TranslationUnitDecl> translationUnitDecl; /// Matches typedef declarations. /// /// Given /// \code /// typedef int X; /// using Y = int; /// \endcode /// typedefDecl() /// matches "typedef int X", but not "using Y = int" extern const internal::VariadicDynCastAllOfMatcher<Decl, TypedefDecl> typedefDecl; /// Matches typedef name declarations. /// /// Given /// \code /// typedef int X; /// using Y = int; /// \endcode /// typedefNameDecl() /// matches "typedef int X" and "using Y = int" extern const internal::VariadicDynCastAllOfMatcher<Decl, TypedefNameDecl> typedefNameDecl; /// Matches type alias declarations. /// /// Given /// \code /// typedef int X; /// using Y = int; /// \endcode /// typeAliasDecl() /// matches "using Y = int", but not "typedef int X" extern const internal::VariadicDynCastAllOfMatcher<Decl, TypeAliasDecl> typeAliasDecl; /// Matches type alias template declarations. /// /// typeAliasTemplateDecl() matches /// \code /// template <typename T> /// using Y = X<T>; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, TypeAliasTemplateDecl> typeAliasTemplateDecl; /// Matches AST nodes that were expanded within the main-file. /// /// Example matches X but not Y /// (matcher = cxxRecordDecl(isExpansionInMainFile()) /// \code /// #include <Y.h> /// class X {}; /// \endcode /// Y.h: /// \code /// class Y {}; /// \endcode /// /// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc> AST_POLYMORPHIC_MATCHER(isExpansionInMainFile, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc)) { auto &SourceManager = Finder->getASTContext().getSourceManager(); return SourceManager.isInMainFile( SourceManager.getExpansionLoc(Node.getBeginLoc())); } /// Matches AST nodes that were expanded within system-header-files. /// /// Example matches Y but not X /// (matcher = cxxRecordDecl(isExpansionInSystemHeader()) /// \code /// #include <SystemHeader.h> /// class X {}; /// \endcode /// SystemHeader.h: /// \code /// class Y {}; /// \endcode /// /// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc> AST_POLYMORPHIC_MATCHER(isExpansionInSystemHeader, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc)) { auto &SourceManager = Finder->getASTContext().getSourceManager(); auto ExpansionLoc = SourceManager.getExpansionLoc(Node.getBeginLoc()); if (ExpansionLoc.isInvalid()) { return false; } return SourceManager.isInSystemHeader(ExpansionLoc); } /// Matches AST nodes that were expanded within files whose name is /// partially matching a given regex. /// /// Example matches Y but not X /// (matcher = cxxRecordDecl(isExpansionInFileMatching("AST.*")) /// \code /// #include "ASTMatcher.h" /// class X {}; /// \endcode /// ASTMatcher.h: /// \code /// class Y {}; /// \endcode /// /// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc> AST_POLYMORPHIC_MATCHER_P(isExpansionInFileMatching, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc), std::string, RegExp) { auto &SourceManager = Finder->getASTContext().getSourceManager(); auto ExpansionLoc = SourceManager.getExpansionLoc(Node.getBeginLoc()); if (ExpansionLoc.isInvalid()) { return false; } auto FileEntry = SourceManager.getFileEntryForID(SourceManager.getFileID(ExpansionLoc)); if (!FileEntry) { return false; } auto Filename = FileEntry->getName(); llvm::Regex RE(RegExp); return RE.match(Filename); } /// Matches declarations. /// /// Examples matches \c X, \c C, and the friend declaration inside \c C; /// \code /// void X(); /// class C { /// friend X; /// }; /// \endcode extern const internal::VariadicAllOfMatcher<Decl> decl; /// Matches a declaration of a linkage specification. /// /// Given /// \code /// extern "C" {} /// \endcode /// linkageSpecDecl() /// matches "extern "C" {}" extern const internal::VariadicDynCastAllOfMatcher<Decl, LinkageSpecDecl> linkageSpecDecl; /// Matches a declaration of anything that could have a name. /// /// Example matches \c X, \c S, the anonymous union type, \c i, and \c U; /// \code /// typedef int X; /// struct S { /// union { /// int i; /// } U; /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, NamedDecl> namedDecl; /// Matches a declaration of label. /// /// Given /// \code /// goto FOO; /// FOO: bar(); /// \endcode /// labelDecl() /// matches 'FOO:' extern const internal::VariadicDynCastAllOfMatcher<Decl, LabelDecl> labelDecl; /// Matches a declaration of a namespace. /// /// Given /// \code /// namespace {} /// namespace test {} /// \endcode /// namespaceDecl() /// matches "namespace {}" and "namespace test {}" extern const internal::VariadicDynCastAllOfMatcher<Decl, NamespaceDecl> namespaceDecl; /// Matches a declaration of a namespace alias. /// /// Given /// \code /// namespace test {} /// namespace alias = ::test; /// \endcode /// namespaceAliasDecl() /// matches "namespace alias" but not "namespace test" extern const internal::VariadicDynCastAllOfMatcher<Decl, NamespaceAliasDecl> namespaceAliasDecl; /// Matches class, struct, and union declarations. /// /// Example matches \c X, \c Z, \c U, and \c S /// \code /// class X; /// template<class T> class Z {}; /// struct S {}; /// union U {}; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, RecordDecl> recordDecl; /// Matches C++ class declarations. /// /// Example matches \c X, \c Z /// \code /// class X; /// template<class T> class Z {}; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXRecordDecl> cxxRecordDecl; /// Matches C++ class template declarations. /// /// Example matches \c Z /// \code /// template<class T> class Z {}; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ClassTemplateDecl> classTemplateDecl; /// Matches C++ class template specializations. /// /// Given /// \code /// template<typename T> class A {}; /// template<> class A<double> {}; /// A<int> a; /// \endcode /// classTemplateSpecializationDecl() /// matches the specializations \c A<int> and \c A<double> extern const internal::VariadicDynCastAllOfMatcher< Decl, ClassTemplateSpecializationDecl> classTemplateSpecializationDecl; /// Matches C++ class template partial specializations. /// /// Given /// \code /// template<class T1, class T2, int I> /// class A {}; /// /// template<class T, int I> /// class A<T, T*, I> {}; /// /// template<> /// class A<int, int, 1> {}; /// \endcode /// classTemplatePartialSpecializationDecl() /// matches the specialization \c A<T,T*,I> but not \c A<int,int,1> extern const internal::VariadicDynCastAllOfMatcher< Decl, ClassTemplatePartialSpecializationDecl> classTemplatePartialSpecializationDecl; /// Matches declarator declarations (field, variable, function /// and non-type template parameter declarations). /// /// Given /// \code /// class X { int y; }; /// \endcode /// declaratorDecl() /// matches \c int y. extern const internal::VariadicDynCastAllOfMatcher<Decl, DeclaratorDecl> declaratorDecl; /// Matches parameter variable declarations. /// /// Given /// \code /// void f(int x); /// \endcode /// parmVarDecl() /// matches \c int x. extern const internal::VariadicDynCastAllOfMatcher<Decl, ParmVarDecl> parmVarDecl; /// Matches C++ access specifier declarations. /// /// Given /// \code /// class C { /// public: /// int a; /// }; /// \endcode /// accessSpecDecl() /// matches 'public:' extern const internal::VariadicDynCastAllOfMatcher<Decl, AccessSpecDecl> accessSpecDecl; /// Matches constructor initializers. /// /// Examples matches \c i(42). /// \code /// class C { /// C() : i(42) {} /// int i; /// }; /// \endcode extern const internal::VariadicAllOfMatcher<CXXCtorInitializer> cxxCtorInitializer; /// Matches template arguments. /// /// Given /// \code /// template <typename T> struct C {}; /// C<int> c; /// \endcode /// templateArgument() /// matches 'int' in C<int>. extern const internal::VariadicAllOfMatcher<TemplateArgument> templateArgument; /// Matches template name. /// /// Given /// \code /// template <typename T> class X { }; /// X<int> xi; /// \endcode /// templateName() /// matches 'X' in X<int>. extern const internal::VariadicAllOfMatcher<TemplateName> templateName; /// Matches non-type template parameter declarations. /// /// Given /// \code /// template <typename T, int N> struct C {}; /// \endcode /// nonTypeTemplateParmDecl() /// matches 'N', but not 'T'. extern const internal::VariadicDynCastAllOfMatcher<Decl, NonTypeTemplateParmDecl> nonTypeTemplateParmDecl; /// Matches template type parameter declarations. /// /// Given /// \code /// template <typename T, int N> struct C {}; /// \endcode /// templateTypeParmDecl() /// matches 'T', but not 'N'. extern const internal::VariadicDynCastAllOfMatcher<Decl, TemplateTypeParmDecl> templateTypeParmDecl; /// Matches public C++ declarations. /// /// Given /// \code /// class C { /// public: int a; /// protected: int b; /// private: int c; /// }; /// \endcode /// fieldDecl(isPublic()) /// matches 'int a;' AST_MATCHER(Decl, isPublic) { return Node.getAccess() == AS_public; } /// Matches protected C++ declarations. /// /// Given /// \code /// class C { /// public: int a; /// protected: int b; /// private: int c; /// }; /// \endcode /// fieldDecl(isProtected()) /// matches 'int b;' AST_MATCHER(Decl, isProtected) { return Node.getAccess() == AS_protected; } /// Matches private C++ declarations. /// /// Given /// \code /// class C { /// public: int a; /// protected: int b; /// private: int c; /// }; /// \endcode /// fieldDecl(isPrivate()) /// matches 'int c;' AST_MATCHER(Decl, isPrivate) { return Node.getAccess() == AS_private; } /// Matches non-static data members that are bit-fields. /// /// Given /// \code /// class C { /// int a : 2; /// int b; /// }; /// \endcode /// fieldDecl(isBitField()) /// matches 'int a;' but not 'int b;'. AST_MATCHER(FieldDecl, isBitField) { return Node.isBitField(); } /// Matches non-static data members that are bit-fields of the specified /// bit width. /// /// Given /// \code /// class C { /// int a : 2; /// int b : 4; /// int c : 2; /// }; /// \endcode /// fieldDecl(hasBitWidth(2)) /// matches 'int a;' and 'int c;' but not 'int b;'. AST_MATCHER_P(FieldDecl, hasBitWidth, unsigned, Width) { return Node.isBitField() && Node.getBitWidthValue(Finder->getASTContext()) == Width; } /// Matches non-static data members that have an in-class initializer. /// /// Given /// \code /// class C { /// int a = 2; /// int b = 3; /// int c; /// }; /// \endcode /// fieldDecl(hasInClassInitializer(integerLiteral(equals(2)))) /// matches 'int a;' but not 'int b;'. /// fieldDecl(hasInClassInitializer(anything())) /// matches 'int a;' and 'int b;' but not 'int c;'. AST_MATCHER_P(FieldDecl, hasInClassInitializer, internal::Matcher<Expr>, InnerMatcher) { const Expr *Initializer = Node.getInClassInitializer(); return (Initializer != nullptr && InnerMatcher.matches(*Initializer, Finder, Builder)); } /// Determines whether the function is "main", which is the entry point /// into an executable program. AST_MATCHER(FunctionDecl, isMain) { return Node.isMain(); } /// Matches the specialized template of a specialization declaration. /// /// Given /// \code /// template<typename T> class A {}; #1 /// template<> class A<int> {}; #2 /// \endcode /// classTemplateSpecializationDecl(hasSpecializedTemplate(classTemplateDecl())) /// matches '#2' with classTemplateDecl() matching the class template /// declaration of 'A' at #1. AST_MATCHER_P(ClassTemplateSpecializationDecl, hasSpecializedTemplate, internal::Matcher<ClassTemplateDecl>, InnerMatcher) { const ClassTemplateDecl* Decl = Node.getSpecializedTemplate(); return (Decl != nullptr && InnerMatcher.matches(*Decl, Finder, Builder)); } /// Matches a declaration that has been implicitly added /// by the compiler (eg. implicit default/copy constructors). AST_MATCHER(Decl, isImplicit) { return Node.isImplicit(); } /// Matches classTemplateSpecializations, templateSpecializationType and /// functionDecl that have at least one TemplateArgument matching the given /// InnerMatcher. /// /// Given /// \code /// template<typename T> class A {}; /// template<> class A<double> {}; /// A<int> a; /// /// template<typename T> f() {}; /// void func() { f<int>(); }; /// \endcode /// /// \endcode /// classTemplateSpecializationDecl(hasAnyTemplateArgument( /// refersToType(asString("int")))) /// matches the specialization \c A<int> /// /// functionDecl(hasAnyTemplateArgument(refersToType(asString("int")))) /// matches the specialization \c f<int> AST_POLYMORPHIC_MATCHER_P( hasAnyTemplateArgument, AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl, TemplateSpecializationType, FunctionDecl), internal::Matcher<TemplateArgument>, InnerMatcher) { ArrayRef<TemplateArgument> List = internal::getTemplateSpecializationArgs(Node); return matchesFirstInRange(InnerMatcher, List.begin(), List.end(), Finder, Builder); } /// Matches expressions that match InnerMatcher after any implicit AST /// nodes are stripped off. /// /// Parentheses and explicit casts are not discarded. /// Given /// \code /// class C {}; /// C a = C(); /// C b; /// C c = b; /// \endcode /// The matchers /// \code /// varDecl(hasInitializer(ignoringImplicit(cxxConstructExpr()))) /// \endcode /// would match the declarations for a, b, and c. /// While /// \code /// varDecl(hasInitializer(cxxConstructExpr())) /// \endcode /// only match the declarations for b and c. AST_MATCHER_P(Expr, ignoringImplicit, internal::Matcher<Expr>, InnerMatcher) { return InnerMatcher.matches(*Node.IgnoreImplicit(), Finder, Builder); } /// Matches expressions that match InnerMatcher after any implicit casts /// are stripped off. /// /// Parentheses and explicit casts are not discarded. /// Given /// \code /// int arr[5]; /// int a = 0; /// char b = 0; /// const int c = a; /// int *d = arr; /// long e = (long) 0l; /// \endcode /// The matchers /// \code /// varDecl(hasInitializer(ignoringImpCasts(integerLiteral()))) /// varDecl(hasInitializer(ignoringImpCasts(declRefExpr()))) /// \endcode /// would match the declarations for a, b, c, and d, but not e. /// While /// \code /// varDecl(hasInitializer(integerLiteral())) /// varDecl(hasInitializer(declRefExpr())) /// \endcode /// only match the declarations for b, c, and d. AST_MATCHER_P(Expr, ignoringImpCasts, internal::Matcher<Expr>, InnerMatcher) { return InnerMatcher.matches(*Node.IgnoreImpCasts(), Finder, Builder); } /// Matches expressions that match InnerMatcher after parentheses and /// casts are stripped off. /// /// Implicit and non-C Style casts are also discarded. /// Given /// \code /// int a = 0; /// char b = (0); /// void* c = reinterpret_cast<char*>(0); /// char d = char(0); /// \endcode /// The matcher /// varDecl(hasInitializer(ignoringParenCasts(integerLiteral()))) /// would match the declarations for a, b, c, and d. /// while /// varDecl(hasInitializer(integerLiteral())) /// only match the declaration for a. AST_MATCHER_P(Expr, ignoringParenCasts, internal::Matcher<Expr>, InnerMatcher) { return InnerMatcher.matches(*Node.IgnoreParenCasts(), Finder, Builder); } /// Matches expressions that match InnerMatcher after implicit casts and /// parentheses are stripped off. /// /// Explicit casts are not discarded. /// Given /// \code /// int arr[5]; /// int a = 0; /// char b = (0); /// const int c = a; /// int *d = (arr); /// long e = ((long) 0l); /// \endcode /// The matchers /// varDecl(hasInitializer(ignoringParenImpCasts(integerLiteral()))) /// varDecl(hasInitializer(ignoringParenImpCasts(declRefExpr()))) /// would match the declarations for a, b, c, and d, but not e. /// while /// varDecl(hasInitializer(integerLiteral())) /// varDecl(hasInitializer(declRefExpr())) /// would only match the declaration for a. AST_MATCHER_P(Expr, ignoringParenImpCasts, internal::Matcher<Expr>, InnerMatcher) { return InnerMatcher.matches(*Node.IgnoreParenImpCasts(), Finder, Builder); } /// Matches types that match InnerMatcher after any parens are stripped. /// /// Given /// \code /// void (*fp)(void); /// \endcode /// The matcher /// \code /// varDecl(hasType(pointerType(pointee(ignoringParens(functionType()))))) /// \endcode /// would match the declaration for fp. AST_MATCHER_P_OVERLOAD(QualType, ignoringParens, internal::Matcher<QualType>, InnerMatcher, 0) { return InnerMatcher.matches(Node.IgnoreParens(), Finder, Builder); } /// Overload \c ignoringParens for \c Expr. /// /// Given /// \code /// const char* str = ("my-string"); /// \endcode /// The matcher /// \code /// implicitCastExpr(hasSourceExpression(ignoringParens(stringLiteral()))) /// \endcode /// would match the implicit cast resulting from the assignment. AST_MATCHER_P_OVERLOAD(Expr, ignoringParens, internal::Matcher<Expr>, InnerMatcher, 1) { const Expr *E = Node.IgnoreParens(); return InnerMatcher.matches(*E, Finder, Builder); } /// Matches expressions that are instantiation-dependent even if it is /// neither type- nor value-dependent. /// /// In the following example, the expression sizeof(sizeof(T() + T())) /// is instantiation-dependent (since it involves a template parameter T), /// but is neither type- nor value-dependent, since the type of the inner /// sizeof is known (std::size_t) and therefore the size of the outer /// sizeof is known. /// \code /// template<typename T> /// void f(T x, T y) { sizeof(sizeof(T() + T()); } /// \endcode /// expr(isInstantiationDependent()) matches sizeof(sizeof(T() + T()) AST_MATCHER(Expr, isInstantiationDependent) { return Node.isInstantiationDependent(); } /// Matches expressions that are type-dependent because the template type /// is not yet instantiated. /// /// For example, the expressions "x" and "x + y" are type-dependent in /// the following code, but "y" is not type-dependent: /// \code /// template<typename T> /// void add(T x, int y) { /// x + y; /// } /// \endcode /// expr(isTypeDependent()) matches x + y AST_MATCHER(Expr, isTypeDependent) { return Node.isTypeDependent(); } /// Matches expression that are value-dependent because they contain a /// non-type template parameter. /// /// For example, the array bound of "Chars" in the following example is /// value-dependent. /// \code /// template<int Size> int f() { return Size; } /// \endcode /// expr(isValueDependent()) matches return Size AST_MATCHER(Expr, isValueDependent) { return Node.isValueDependent(); } /// Matches classTemplateSpecializations, templateSpecializationType and /// functionDecl where the n'th TemplateArgument matches the given InnerMatcher. /// /// Given /// \code /// template<typename T, typename U> class A {}; /// A<bool, int> b; /// A<int, bool> c; /// /// template<typename T> void f() {} /// void func() { f<int>(); }; /// \endcode /// classTemplateSpecializationDecl(hasTemplateArgument( /// 1, refersToType(asString("int")))) /// matches the specialization \c A<bool, int> /// /// functionDecl(hasTemplateArgument(0, refersToType(asString("int")))) /// matches the specialization \c f<int> AST_POLYMORPHIC_MATCHER_P2( hasTemplateArgument, AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl, TemplateSpecializationType, FunctionDecl), unsigned, N, internal::Matcher<TemplateArgument>, InnerMatcher) { ArrayRef<TemplateArgument> List = internal::getTemplateSpecializationArgs(Node); if (List.size() <= N) return false; return InnerMatcher.matches(List[N], Finder, Builder); } /// Matches if the number of template arguments equals \p N. /// /// Given /// \code /// template<typename T> struct C {}; /// C<int> c; /// \endcode /// classTemplateSpecializationDecl(templateArgumentCountIs(1)) /// matches C<int>. AST_POLYMORPHIC_MATCHER_P( templateArgumentCountIs, AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl, TemplateSpecializationType), unsigned, N) { return internal::getTemplateSpecializationArgs(Node).size() == N; } /// Matches a TemplateArgument that refers to a certain type. /// /// Given /// \code /// struct X {}; /// template<typename T> struct A {}; /// A<X> a; /// \endcode /// classTemplateSpecializationDecl(hasAnyTemplateArgument( /// refersToType(class(hasName("X"))))) /// matches the specialization \c A<X> AST_MATCHER_P(TemplateArgument, refersToType, internal::Matcher<QualType>, InnerMatcher) { if (Node.getKind() != TemplateArgument::Type) return false; return InnerMatcher.matches(Node.getAsType(), Finder, Builder); } /// Matches a TemplateArgument that refers to a certain template. /// /// Given /// \code /// template<template <typename> class S> class X {}; /// template<typename T> class Y {}; /// X<Y> xi; /// \endcode /// classTemplateSpecializationDecl(hasAnyTemplateArgument( /// refersToTemplate(templateName()))) /// matches the specialization \c X<Y> AST_MATCHER_P(TemplateArgument, refersToTemplate, internal::Matcher<TemplateName>, InnerMatcher) { if (Node.getKind() != TemplateArgument::Template) return false; return InnerMatcher.matches(Node.getAsTemplate(), Finder, Builder); } /// Matches a canonical TemplateArgument that refers to a certain /// declaration. /// /// Given /// \code /// struct B { int next; }; /// template<int(B::*next_ptr)> struct A {}; /// A<&B::next> a; /// \endcode /// classTemplateSpecializationDecl(hasAnyTemplateArgument( /// refersToDeclaration(fieldDecl(hasName("next"))))) /// matches the specialization \c A<&B::next> with \c fieldDecl(...) matching /// \c B::next AST_MATCHER_P(TemplateArgument, refersToDeclaration, internal::Matcher<Decl>, InnerMatcher) { if (Node.getKind() == TemplateArgument::Declaration) return InnerMatcher.matches(*Node.getAsDecl(), Finder, Builder); return false; } /// Matches a sugar TemplateArgument that refers to a certain expression. /// /// Given /// \code /// struct B { int next; }; /// template<int(B::*next_ptr)> struct A {}; /// A<&B::next> a; /// \endcode /// templateSpecializationType(hasAnyTemplateArgument( /// isExpr(hasDescendant(declRefExpr(to(fieldDecl(hasName("next")))))))) /// matches the specialization \c A<&B::next> with \c fieldDecl(...) matching /// \c B::next AST_MATCHER_P(TemplateArgument, isExpr, internal::Matcher<Expr>, InnerMatcher) { if (Node.getKind() == TemplateArgument::Expression) return InnerMatcher.matches(*Node.getAsExpr(), Finder, Builder); return false; } /// Matches a TemplateArgument that is an integral value. /// /// Given /// \code /// template<int T> struct C {}; /// C<42> c; /// \endcode /// classTemplateSpecializationDecl( /// hasAnyTemplateArgument(isIntegral())) /// matches the implicit instantiation of C in C<42> /// with isIntegral() matching 42. AST_MATCHER(TemplateArgument, isIntegral) { return Node.getKind() == TemplateArgument::Integral; } /// Matches a TemplateArgument that referes to an integral type. /// /// Given /// \code /// template<int T> struct C {}; /// C<42> c; /// \endcode /// classTemplateSpecializationDecl( /// hasAnyTemplateArgument(refersToIntegralType(asString("int")))) /// matches the implicit instantiation of C in C<42>. AST_MATCHER_P(TemplateArgument, refersToIntegralType, internal::Matcher<QualType>, InnerMatcher) { if (Node.getKind() != TemplateArgument::Integral) return false; return InnerMatcher.matches(Node.getIntegralType(), Finder, Builder); } /// Matches a TemplateArgument of integral type with a given value. /// /// Note that 'Value' is a string as the template argument's value is /// an arbitrary precision integer. 'Value' must be euqal to the canonical /// representation of that integral value in base 10. /// /// Given /// \code /// template<int T> struct C {}; /// C<42> c; /// \endcode /// classTemplateSpecializationDecl( /// hasAnyTemplateArgument(equalsIntegralValue("42"))) /// matches the implicit instantiation of C in C<42>. AST_MATCHER_P(TemplateArgument, equalsIntegralValue, std::string, Value) { if (Node.getKind() != TemplateArgument::Integral) return false; return Node.getAsIntegral().toString(10) == Value; } /// Matches an Objective-C autorelease pool statement. /// /// Given /// \code /// @autoreleasepool { /// int x = 0; /// } /// \endcode /// autoreleasePoolStmt(stmt()) matches the declaration of "x" /// inside the autorelease pool. extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAutoreleasePoolStmt> autoreleasePoolStmt; /// Matches any value declaration. /// /// Example matches A, B, C and F /// \code /// enum X { A, B, C }; /// void F(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ValueDecl> valueDecl; /// Matches C++ constructor declarations. /// /// Example matches Foo::Foo() and Foo::Foo(int) /// \code /// class Foo { /// public: /// Foo(); /// Foo(int); /// int DoSomething(); /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXConstructorDecl> cxxConstructorDecl; /// Matches explicit C++ destructor declarations. /// /// Example matches Foo::~Foo() /// \code /// class Foo { /// public: /// virtual ~Foo(); /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXDestructorDecl> cxxDestructorDecl; /// Matches enum declarations. /// /// Example matches X /// \code /// enum X { /// A, B, C /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, EnumDecl> enumDecl; /// Matches enum constants. /// /// Example matches A, B, C /// \code /// enum X { /// A, B, C /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, EnumConstantDecl> enumConstantDecl; /// Matches method declarations. /// /// Example matches y /// \code /// class X { void y(); }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXMethodDecl> cxxMethodDecl; /// Matches conversion operator declarations. /// /// Example matches the operator. /// \code /// class X { operator int() const; }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXConversionDecl> cxxConversionDecl; /// Matches user-defined and implicitly generated deduction guide. /// /// Example matches the deduction guide. /// \code /// template<typename T> /// class X { X(int) }; /// X(int) -> X<int>; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXDeductionGuideDecl> cxxDeductionGuideDecl; /// Matches variable declarations. /// /// Note: this does not match declarations of member variables, which are /// "field" declarations in Clang parlance. /// /// Example matches a /// \code /// int a; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, VarDecl> varDecl; /// Matches field declarations. /// /// Given /// \code /// class X { int m; }; /// \endcode /// fieldDecl() /// matches 'm'. extern const internal::VariadicDynCastAllOfMatcher<Decl, FieldDecl> fieldDecl; /// Matches indirect field declarations. /// /// Given /// \code /// struct X { struct { int a; }; }; /// \endcode /// indirectFieldDecl() /// matches 'a'. extern const internal::VariadicDynCastAllOfMatcher<Decl, IndirectFieldDecl> indirectFieldDecl; /// Matches function declarations. /// /// Example matches f /// \code /// void f(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, FunctionDecl> functionDecl; /// Matches C++ function template declarations. /// /// Example matches f /// \code /// template<class T> void f(T t) {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, FunctionTemplateDecl> functionTemplateDecl; /// Matches friend declarations. /// /// Given /// \code /// class X { friend void foo(); }; /// \endcode /// friendDecl() /// matches 'friend void foo()'. extern const internal::VariadicDynCastAllOfMatcher<Decl, FriendDecl> friendDecl; /// Matches statements. /// /// Given /// \code /// { ++a; } /// \endcode /// stmt() /// matches both the compound statement '{ ++a; }' and '++a'. extern const internal::VariadicAllOfMatcher<Stmt> stmt; /// Matches declaration statements. /// /// Given /// \code /// int a; /// \endcode /// declStmt() /// matches 'int a'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, DeclStmt> declStmt; /// Matches member expressions. /// /// Given /// \code /// class Y { /// void x() { this->x(); x(); Y y; y.x(); a; this->b; Y::b; } /// int a; static int b; /// }; /// \endcode /// memberExpr() /// matches this->x, x, y.x, a, this->b extern const internal::VariadicDynCastAllOfMatcher<Stmt, MemberExpr> memberExpr; /// Matches unresolved member expressions. /// /// Given /// \code /// struct X { /// template <class T> void f(); /// void g(); /// }; /// template <class T> void h() { X x; x.f<T>(); x.g(); } /// \endcode /// unresolvedMemberExpr() /// matches x.f<T> extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnresolvedMemberExpr> unresolvedMemberExpr; /// Matches member expressions where the actual member referenced could not be /// resolved because the base expression or the member name was dependent. /// /// Given /// \code /// template <class T> void f() { T t; t.g(); } /// \endcode /// cxxDependentScopeMemberExpr() /// matches t.g extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDependentScopeMemberExpr> cxxDependentScopeMemberExpr; /// Matches call expressions. /// /// Example matches x.y() and y() /// \code /// X x; /// x.y(); /// y(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CallExpr> callExpr; /// Matches call expressions which were resolved using ADL. /// /// Example matches y(x) but not y(42) or NS::y(x). /// \code /// namespace NS { /// struct X {}; /// void y(X); /// } /// /// void y(...); /// /// void test() { /// NS::X x; /// y(x); // Matches /// NS::y(x); // Doesn't match /// y(42); // Doesn't match /// using NS::y; /// y(x); // Found by both unqualified lookup and ADL, doesn't match // } /// \endcode AST_MATCHER(CallExpr, usesADL) { return Node.usesADL(); } /// Matches lambda expressions. /// /// Example matches [&](){return 5;} /// \code /// [&](){return 5;} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, LambdaExpr> lambdaExpr; /// Matches member call expressions. /// /// Example matches x.y() /// \code /// X x; /// x.y(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXMemberCallExpr> cxxMemberCallExpr; /// Matches ObjectiveC Message invocation expressions. /// /// The innermost message send invokes the "alloc" class method on the /// NSString class, while the outermost message send invokes the /// "initWithString" instance method on the object returned from /// NSString's "alloc". This matcher should match both message sends. /// \code /// [[NSString alloc] initWithString:@"Hello"] /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCMessageExpr> objcMessageExpr; /// Matches Objective-C interface declarations. /// /// Example matches Foo /// \code /// @interface Foo /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCInterfaceDecl> objcInterfaceDecl; /// Matches Objective-C implementation declarations. /// /// Example matches Foo /// \code /// @implementation Foo /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCImplementationDecl> objcImplementationDecl; /// Matches Objective-C protocol declarations. /// /// Example matches FooDelegate /// \code /// @protocol FooDelegate /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCProtocolDecl> objcProtocolDecl; /// Matches Objective-C category declarations. /// /// Example matches Foo (Additions) /// \code /// @interface Foo (Additions) /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCCategoryDecl> objcCategoryDecl; /// Matches Objective-C category definitions. /// /// Example matches Foo (Additions) /// \code /// @implementation Foo (Additions) /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCCategoryImplDecl> objcCategoryImplDecl; /// Matches Objective-C method declarations. /// /// Example matches both declaration and definition of -[Foo method] /// \code /// @interface Foo /// - (void)method; /// @end /// /// @implementation Foo /// - (void)method {} /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCMethodDecl> objcMethodDecl; /// Matches block declarations. /// /// Example matches the declaration of the nameless block printing an input /// integer. /// /// \code /// myFunc(^(int p) { /// printf("%d", p); /// }) /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, BlockDecl> blockDecl; /// Matches Objective-C instance variable declarations. /// /// Example matches _enabled /// \code /// @implementation Foo { /// BOOL _enabled; /// } /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCIvarDecl> objcIvarDecl; /// Matches Objective-C property declarations. /// /// Example matches enabled /// \code /// @interface Foo /// @property BOOL enabled; /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCPropertyDecl> objcPropertyDecl; /// Matches Objective-C \@throw statements. /// /// Example matches \@throw /// \code /// @throw obj; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtThrowStmt> objcThrowStmt; /// Matches Objective-C @try statements. /// /// Example matches @try /// \code /// @try {} /// @catch (...) {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtTryStmt> objcTryStmt; /// Matches Objective-C @catch statements. /// /// Example matches @catch /// \code /// @try {} /// @catch (...) {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtCatchStmt> objcCatchStmt; /// Matches Objective-C @finally statements. /// /// Example matches @finally /// \code /// @try {} /// @finally {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtFinallyStmt> objcFinallyStmt; /// Matches expressions that introduce cleanups to be run at the end /// of the sub-expression's evaluation. /// /// Example matches std::string() /// \code /// const std::string str = std::string(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ExprWithCleanups> exprWithCleanups; /// Matches init list expressions. /// /// Given /// \code /// int a[] = { 1, 2 }; /// struct B { int x, y; }; /// B b = { 5, 6 }; /// \endcode /// initListExpr() /// matches "{ 1, 2 }" and "{ 5, 6 }" extern const internal::VariadicDynCastAllOfMatcher<Stmt, InitListExpr> initListExpr; /// Matches the syntactic form of init list expressions /// (if expression have it). AST_MATCHER_P(InitListExpr, hasSyntacticForm, internal::Matcher<Expr>, InnerMatcher) { const Expr *SyntForm = Node.getSyntacticForm(); return (SyntForm != nullptr && InnerMatcher.matches(*SyntForm, Finder, Builder)); } /// Matches C++ initializer list expressions. /// /// Given /// \code /// std::vector<int> a({ 1, 2, 3 }); /// std::vector<int> b = { 4, 5 }; /// int c[] = { 6, 7 }; /// std::pair<int, int> d = { 8, 9 }; /// \endcode /// cxxStdInitializerListExpr() /// matches "{ 1, 2, 3 }" and "{ 4, 5 }" extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXStdInitializerListExpr> cxxStdInitializerListExpr; /// Matches implicit initializers of init list expressions. /// /// Given /// \code /// point ptarray[10] = { [2].y = 1.0, [2].x = 2.0, [0].x = 1.0 }; /// \endcode /// implicitValueInitExpr() /// matches "[0].y" (implicitly) extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImplicitValueInitExpr> implicitValueInitExpr; /// Matches paren list expressions. /// ParenListExprs don't have a predefined type and are used for late parsing. /// In the final AST, they can be met in template declarations. /// /// Given /// \code /// template<typename T> class X { /// void f() { /// X x(*this); /// int a = 0, b = 1; int i = (a, b); /// } /// }; /// \endcode /// parenListExpr() matches "*this" but NOT matches (a, b) because (a, b) /// has a predefined type and is a ParenExpr, not a ParenListExpr. extern const internal::VariadicDynCastAllOfMatcher<Stmt, ParenListExpr> parenListExpr; /// Matches substitutions of non-type template parameters. /// /// Given /// \code /// template <int N> /// struct A { static const int n = N; }; /// struct B : public A<42> {}; /// \endcode /// substNonTypeTemplateParmExpr() /// matches "N" in the right-hand side of "static const int n = N;" extern const internal::VariadicDynCastAllOfMatcher<Stmt, SubstNonTypeTemplateParmExpr> substNonTypeTemplateParmExpr; /// Matches using declarations. /// /// Given /// \code /// namespace X { int x; } /// using X::x; /// \endcode /// usingDecl() /// matches \code using X::x \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, UsingDecl> usingDecl; /// Matches using namespace declarations. /// /// Given /// \code /// namespace X { int x; } /// using namespace X; /// \endcode /// usingDirectiveDecl() /// matches \code using namespace X \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, UsingDirectiveDecl> usingDirectiveDecl; /// Matches reference to a name that can be looked up during parsing /// but could not be resolved to a specific declaration. /// /// Given /// \code /// template<typename T> /// T foo() { T a; return a; } /// template<typename T> /// void bar() { /// foo<T>(); /// } /// \endcode /// unresolvedLookupExpr() /// matches \code foo<T>() \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnresolvedLookupExpr> unresolvedLookupExpr; /// Matches unresolved using value declarations. /// /// Given /// \code /// template<typename X> /// class C : private X { /// using X::x; /// }; /// \endcode /// unresolvedUsingValueDecl() /// matches \code using X::x \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, UnresolvedUsingValueDecl> unresolvedUsingValueDecl; /// Matches unresolved using value declarations that involve the /// typename. /// /// Given /// \code /// template <typename T> /// struct Base { typedef T Foo; }; /// /// template<typename T> /// struct S : private Base<T> { /// using typename Base<T>::Foo; /// }; /// \endcode /// unresolvedUsingTypenameDecl() /// matches \code using Base<T>::Foo \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, UnresolvedUsingTypenameDecl> unresolvedUsingTypenameDecl; /// Matches a constant expression wrapper. /// /// Example matches the constant in the case statement: /// (matcher = constantExpr()) /// \code /// switch (a) { /// case 37: break; /// } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ConstantExpr> constantExpr; /// Matches parentheses used in expressions. /// /// Example matches (foo() + 1) /// \code /// int foo() { return 1; } /// int a = (foo() + 1); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ParenExpr> parenExpr; /// Matches constructor call expressions (including implicit ones). /// /// Example matches string(ptr, n) and ptr within arguments of f /// (matcher = cxxConstructExpr()) /// \code /// void f(const string &a, const string &b); /// char *ptr; /// int n; /// f(string(ptr, n), ptr); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXConstructExpr> cxxConstructExpr; /// Matches unresolved constructor call expressions. /// /// Example matches T(t) in return statement of f /// (matcher = cxxUnresolvedConstructExpr()) /// \code /// template <typename T> /// void f(const T& t) { return T(t); } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXUnresolvedConstructExpr> cxxUnresolvedConstructExpr; /// Matches implicit and explicit this expressions. /// /// Example matches the implicit this expression in "return i". /// (matcher = cxxThisExpr()) /// \code /// struct foo { /// int i; /// int f() { return i; } /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXThisExpr> cxxThisExpr; /// Matches nodes where temporaries are created. /// /// Example matches FunctionTakesString(GetStringByValue()) /// (matcher = cxxBindTemporaryExpr()) /// \code /// FunctionTakesString(GetStringByValue()); /// FunctionTakesStringByPointer(GetStringPointer()); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXBindTemporaryExpr> cxxBindTemporaryExpr; /// Matches nodes where temporaries are materialized. /// /// Example: Given /// \code /// struct T {void func();}; /// T f(); /// void g(T); /// \endcode /// materializeTemporaryExpr() matches 'f()' in these statements /// \code /// T u(f()); /// g(f()); /// f().func(); /// \endcode /// but does not match /// \code /// f(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, MaterializeTemporaryExpr> materializeTemporaryExpr; /// Matches new expressions. /// /// Given /// \code /// new X; /// \endcode /// cxxNewExpr() /// matches 'new X'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNewExpr> cxxNewExpr; /// Matches delete expressions. /// /// Given /// \code /// delete X; /// \endcode /// cxxDeleteExpr() /// matches 'delete X'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDeleteExpr> cxxDeleteExpr; /// Matches array subscript expressions. /// /// Given /// \code /// int i = a[1]; /// \endcode /// arraySubscriptExpr() /// matches "a[1]" extern const internal::VariadicDynCastAllOfMatcher<Stmt, ArraySubscriptExpr> arraySubscriptExpr; /// Matches the value of a default argument at the call site. /// /// Example matches the CXXDefaultArgExpr placeholder inserted for the /// default value of the second parameter in the call expression f(42) /// (matcher = cxxDefaultArgExpr()) /// \code /// void f(int x, int y = 0); /// f(42); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDefaultArgExpr> cxxDefaultArgExpr; /// Matches overloaded operator calls. /// /// Note that if an operator isn't overloaded, it won't match. Instead, use /// binaryOperator matcher. /// Currently it does not match operators such as new delete. /// FIXME: figure out why these do not match? /// /// Example matches both operator<<((o << b), c) and operator<<(o, b) /// (matcher = cxxOperatorCallExpr()) /// \code /// ostream &operator<< (ostream &out, int i) { }; /// ostream &o; int b = 1, c = 1; /// o << b << c; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXOperatorCallExpr> cxxOperatorCallExpr; /// Matches expressions. /// /// Example matches x() /// \code /// void f() { x(); } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, Expr> expr; /// Matches expressions that refer to declarations. /// /// Example matches x in if (x) /// \code /// bool x; /// if (x) {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, DeclRefExpr> declRefExpr; /// Matches a reference to an ObjCIvar. /// /// Example: matches "a" in "init" method: /// \code /// @implementation A { /// NSString *a; /// } /// - (void) init { /// a = @"hello"; /// } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCIvarRefExpr> objcIvarRefExpr; /// Matches a reference to a block. /// /// Example: matches "^{}": /// \code /// void f() { ^{}(); } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, BlockExpr> blockExpr; /// Matches if statements. /// /// Example matches 'if (x) {}' /// \code /// if (x) {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, IfStmt> ifStmt; /// Matches for statements. /// /// Example matches 'for (;;) {}' /// \code /// for (;;) {} /// int i[] = {1, 2, 3}; for (auto a : i); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ForStmt> forStmt; /// Matches the increment statement of a for loop. /// /// Example: /// forStmt(hasIncrement(unaryOperator(hasOperatorName("++")))) /// matches '++x' in /// \code /// for (x; x < N; ++x) { } /// \endcode AST_MATCHER_P(ForStmt, hasIncrement, internal::Matcher<Stmt>, InnerMatcher) { const Stmt *const Increment = Node.getInc(); return (Increment != nullptr && InnerMatcher.matches(*Increment, Finder, Builder)); } /// Matches the initialization statement of a for loop. /// /// Example: /// forStmt(hasLoopInit(declStmt())) /// matches 'int x = 0' in /// \code /// for (int x = 0; x < N; ++x) { } /// \endcode AST_MATCHER_P(ForStmt, hasLoopInit, internal::Matcher<Stmt>, InnerMatcher) { const Stmt *const Init = Node.getInit(); return (Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder)); } /// Matches range-based for statements. /// /// cxxForRangeStmt() matches 'for (auto a : i)' /// \code /// int i[] = {1, 2, 3}; for (auto a : i); /// for(int j = 0; j < 5; ++j); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXForRangeStmt> cxxForRangeStmt; /// Matches the initialization statement of a for loop. /// /// Example: /// forStmt(hasLoopVariable(anything())) /// matches 'int x' in /// \code /// for (int x : a) { } /// \endcode AST_MATCHER_P(CXXForRangeStmt, hasLoopVariable, internal::Matcher<VarDecl>, InnerMatcher) { const VarDecl *const Var = Node.getLoopVariable(); return (Var != nullptr && InnerMatcher.matches(*Var, Finder, Builder)); } /// Matches the range initialization statement of a for loop. /// /// Example: /// forStmt(hasRangeInit(anything())) /// matches 'a' in /// \code /// for (int x : a) { } /// \endcode AST_MATCHER_P(CXXForRangeStmt, hasRangeInit, internal::Matcher<Expr>, InnerMatcher) { const Expr *const Init = Node.getRangeInit(); return (Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder)); } /// Matches while statements. /// /// Given /// \code /// while (true) {} /// \endcode /// whileStmt() /// matches 'while (true) {}'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, WhileStmt> whileStmt; /// Matches do statements. /// /// Given /// \code /// do {} while (true); /// \endcode /// doStmt() /// matches 'do {} while(true)' extern const internal::VariadicDynCastAllOfMatcher<Stmt, DoStmt> doStmt; /// Matches break statements. /// /// Given /// \code /// while (true) { break; } /// \endcode /// breakStmt() /// matches 'break' extern const internal::VariadicDynCastAllOfMatcher<Stmt, BreakStmt> breakStmt; /// Matches continue statements. /// /// Given /// \code /// while (true) { continue; } /// \endcode /// continueStmt() /// matches 'continue' extern const internal::VariadicDynCastAllOfMatcher<Stmt, ContinueStmt> continueStmt; /// Matches return statements. /// /// Given /// \code /// return 1; /// \endcode /// returnStmt() /// matches 'return 1' extern const internal::VariadicDynCastAllOfMatcher<Stmt, ReturnStmt> returnStmt; /// Matches goto statements. /// /// Given /// \code /// goto FOO; /// FOO: bar(); /// \endcode /// gotoStmt() /// matches 'goto FOO' extern const internal::VariadicDynCastAllOfMatcher<Stmt, GotoStmt> gotoStmt; /// Matches label statements. /// /// Given /// \code /// goto FOO; /// FOO: bar(); /// \endcode /// labelStmt() /// matches 'FOO:' extern const internal::VariadicDynCastAllOfMatcher<Stmt, LabelStmt> labelStmt; /// Matches address of label statements (GNU extension). /// /// Given /// \code /// FOO: bar(); /// void *ptr = &&FOO; /// goto *bar; /// \endcode /// addrLabelExpr() /// matches '&&FOO' extern const internal::VariadicDynCastAllOfMatcher<Stmt, AddrLabelExpr> addrLabelExpr; /// Matches switch statements. /// /// Given /// \code /// switch(a) { case 42: break; default: break; } /// \endcode /// switchStmt() /// matches 'switch(a)'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, SwitchStmt> switchStmt; /// Matches case and default statements inside switch statements. /// /// Given /// \code /// switch(a) { case 42: break; default: break; } /// \endcode /// switchCase() /// matches 'case 42:' and 'default:'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, SwitchCase> switchCase; /// Matches case statements inside switch statements. /// /// Given /// \code /// switch(a) { case 42: break; default: break; } /// \endcode /// caseStmt() /// matches 'case 42:'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CaseStmt> caseStmt; /// Matches default statements inside switch statements. /// /// Given /// \code /// switch(a) { case 42: break; default: break; } /// \endcode /// defaultStmt() /// matches 'default:'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, DefaultStmt> defaultStmt; /// Matches compound statements. /// /// Example matches '{}' and '{{}}' in 'for (;;) {{}}' /// \code /// for (;;) {{}} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CompoundStmt> compoundStmt; /// Matches catch statements. /// /// \code /// try {} catch(int i) {} /// \endcode /// cxxCatchStmt() /// matches 'catch(int i)' extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXCatchStmt> cxxCatchStmt; /// Matches try statements. /// /// \code /// try {} catch(int i) {} /// \endcode /// cxxTryStmt() /// matches 'try {}' extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXTryStmt> cxxTryStmt; /// Matches throw expressions. /// /// \code /// try { throw 5; } catch(int i) {} /// \endcode /// cxxThrowExpr() /// matches 'throw 5' extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXThrowExpr> cxxThrowExpr; /// Matches null statements. /// /// \code /// foo();; /// \endcode /// nullStmt() /// matches the second ';' extern const internal::VariadicDynCastAllOfMatcher<Stmt, NullStmt> nullStmt; /// Matches asm statements. /// /// \code /// int i = 100; /// __asm("mov al, 2"); /// \endcode /// asmStmt() /// matches '__asm("mov al, 2")' extern const internal::VariadicDynCastAllOfMatcher<Stmt, AsmStmt> asmStmt; /// Matches bool literals. /// /// Example matches true /// \code /// true /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXBoolLiteralExpr> cxxBoolLiteral; /// Matches string literals (also matches wide string literals). /// /// Example matches "abcd", L"abcd" /// \code /// char *s = "abcd"; /// wchar_t *ws = L"abcd"; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, StringLiteral> stringLiteral; /// Matches character literals (also matches wchar_t). /// /// Not matching Hex-encoded chars (e.g. 0x1234, which is a IntegerLiteral), /// though. /// /// Example matches 'a', L'a' /// \code /// char ch = 'a'; /// wchar_t chw = L'a'; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CharacterLiteral> characterLiteral; /// Matches integer literals of all sizes / encodings, e.g. /// 1, 1L, 0x1 and 1U. /// /// Does not match character-encoded integers such as L'a'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, IntegerLiteral> integerLiteral; /// Matches float literals of all sizes / encodings, e.g. /// 1.0, 1.0f, 1.0L and 1e10. /// /// Does not match implicit conversions such as /// \code /// float a = 10; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, FloatingLiteral> floatLiteral; /// Matches imaginary literals, which are based on integer and floating /// point literals e.g.: 1i, 1.0i extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImaginaryLiteral> imaginaryLiteral; /// Matches user defined literal operator call. /// /// Example match: "foo"_suffix extern const internal::VariadicDynCastAllOfMatcher<Stmt, UserDefinedLiteral> userDefinedLiteral; /// Matches compound (i.e. non-scalar) literals /// /// Example match: {1}, (1, 2) /// \code /// int array[4] = {1}; /// vector int myvec = (vector int)(1, 2); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CompoundLiteralExpr> compoundLiteralExpr; /// Matches nullptr literal. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNullPtrLiteralExpr> cxxNullPtrLiteralExpr; /// Matches GNU __builtin_choose_expr. extern const internal::VariadicDynCastAllOfMatcher<Stmt, ChooseExpr> chooseExpr; /// Matches GNU __null expression. extern const internal::VariadicDynCastAllOfMatcher<Stmt, GNUNullExpr> gnuNullExpr; /// Matches atomic builtins. /// Example matches __atomic_load_n(ptr, 1) /// \code /// void foo() { int *ptr; __atomic_load_n(ptr, 1); } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, AtomicExpr> atomicExpr; /// Matches statement expression (GNU extension). /// /// Example match: ({ int X = 4; X; }) /// \code /// int C = ({ int X = 4; X; }); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, StmtExpr> stmtExpr; /// Matches binary operator expressions. /// /// Example matches a || b /// \code /// !(a || b) /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, BinaryOperator> binaryOperator; /// Matches unary operator expressions. /// /// Example matches !a /// \code /// !a || b /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnaryOperator> unaryOperator; /// Matches conditional operator expressions. /// /// Example matches a ? b : c /// \code /// (a ? b : c) + 42 /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ConditionalOperator> conditionalOperator; /// Matches binary conditional operator expressions (GNU extension). /// /// Example matches a ?: b /// \code /// (a ?: b) + 42; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, BinaryConditionalOperator> binaryConditionalOperator; /// Matches opaque value expressions. They are used as helpers /// to reference another expressions and can be met /// in BinaryConditionalOperators, for example. /// /// Example matches 'a' /// \code /// (a ?: c) + 42; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, OpaqueValueExpr> opaqueValueExpr; /// Matches a C++ static_assert declaration. /// /// Example: /// staticAssertExpr() /// matches /// static_assert(sizeof(S) == sizeof(int)) /// in /// \code /// struct S { /// int x; /// }; /// static_assert(sizeof(S) == sizeof(int)); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, StaticAssertDecl> staticAssertDecl; /// Matches a reinterpret_cast expression. /// /// Either the source expression or the destination type can be matched /// using has(), but hasDestinationType() is more specific and can be /// more readable. /// /// Example matches reinterpret_cast<char*>(&p) in /// \code /// void* p = reinterpret_cast<char*>(&p); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXReinterpretCastExpr> cxxReinterpretCastExpr; /// Matches a C++ static_cast expression. /// /// \see hasDestinationType /// \see reinterpretCast /// /// Example: /// cxxStaticCastExpr() /// matches /// static_cast<long>(8) /// in /// \code /// long eight(static_cast<long>(8)); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXStaticCastExpr> cxxStaticCastExpr; /// Matches a dynamic_cast expression. /// /// Example: /// cxxDynamicCastExpr() /// matches /// dynamic_cast<D*>(&b); /// in /// \code /// struct B { virtual ~B() {} }; struct D : B {}; /// B b; /// D* p = dynamic_cast<D*>(&b); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDynamicCastExpr> cxxDynamicCastExpr; /// Matches a const_cast expression. /// /// Example: Matches const_cast<int*>(&r) in /// \code /// int n = 42; /// const int &r(n); /// int* p = const_cast<int*>(&r); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXConstCastExpr> cxxConstCastExpr; /// Matches a C-style cast expression. /// /// Example: Matches (int) 2.2f in /// \code /// int i = (int) 2.2f; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CStyleCastExpr> cStyleCastExpr; /// Matches explicit cast expressions. /// /// Matches any cast expression written in user code, whether it be a /// C-style cast, a functional-style cast, or a keyword cast. /// /// Does not match implicit conversions. /// /// Note: the name "explicitCast" is chosen to match Clang's terminology, as /// Clang uses the term "cast" to apply to implicit conversions as well as to /// actual cast expressions. /// /// \see hasDestinationType. /// /// Example: matches all five of the casts in /// \code /// int((int)(reinterpret_cast<int>(static_cast<int>(const_cast<int>(42))))) /// \endcode /// but does not match the implicit conversion in /// \code /// long ell = 42; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ExplicitCastExpr> explicitCastExpr; /// Matches the implicit cast nodes of Clang's AST. /// /// This matches many different places, including function call return value /// eliding, as well as any type conversions. extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImplicitCastExpr> implicitCastExpr; /// Matches any cast nodes of Clang's AST. /// /// Example: castExpr() matches each of the following: /// \code /// (int) 3; /// const_cast<Expr *>(SubExpr); /// char c = 0; /// \endcode /// but does not match /// \code /// int i = (0); /// int k = 0; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CastExpr> castExpr; /// Matches functional cast expressions /// /// Example: Matches Foo(bar); /// \code /// Foo f = bar; /// Foo g = (Foo) bar; /// Foo h = Foo(bar); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXFunctionalCastExpr> cxxFunctionalCastExpr; /// Matches functional cast expressions having N != 1 arguments /// /// Example: Matches Foo(bar, bar) /// \code /// Foo h = Foo(bar, bar); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXTemporaryObjectExpr> cxxTemporaryObjectExpr; /// Matches predefined identifier expressions [C99 6.4.2.2]. /// /// Example: Matches __func__ /// \code /// printf("%s", __func__); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, PredefinedExpr> predefinedExpr; /// Matches C99 designated initializer expressions [C99 6.7.8]. /// /// Example: Matches { [2].y = 1.0, [0].x = 1.0 } /// \code /// point ptarray[10] = { [2].y = 1.0, [0].x = 1.0 }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, DesignatedInitExpr> designatedInitExpr; /// Matches designated initializer expressions that contain /// a specific number of designators. /// /// Example: Given /// \code /// point ptarray[10] = { [2].y = 1.0, [0].x = 1.0 }; /// point ptarray2[10] = { [2].y = 1.0, [2].x = 0.0, [0].x = 1.0 }; /// \endcode /// designatorCountIs(2) /// matches '{ [2].y = 1.0, [0].x = 1.0 }', /// but not '{ [2].y = 1.0, [2].x = 0.0, [0].x = 1.0 }'. AST_MATCHER_P(DesignatedInitExpr, designatorCountIs, unsigned, N) { return Node.size() == N; } /// Matches \c QualTypes in the clang AST. extern const internal::VariadicAllOfMatcher<QualType> qualType; /// Matches \c Types in the clang AST. extern const internal::VariadicAllOfMatcher<Type> type; /// Matches \c TypeLocs in the clang AST. extern const internal::VariadicAllOfMatcher<TypeLoc> typeLoc; /// Matches if any of the given matchers matches. /// /// Unlike \c anyOf, \c eachOf will generate a match result for each /// matching submatcher. /// /// For example, in: /// \code /// class A { int a; int b; }; /// \endcode /// The matcher: /// \code /// cxxRecordDecl(eachOf(has(fieldDecl(hasName("a")).bind("v")), /// has(fieldDecl(hasName("b")).bind("v")))) /// \endcode /// will generate two results binding "v", the first of which binds /// the field declaration of \c a, the second the field declaration of /// \c b. /// /// Usable as: Any Matcher extern const internal::VariadicOperatorMatcherFunc< 2, std::numeric_limits<unsigned>::max()> eachOf; /// Matches if any of the given matchers matches. /// /// Usable as: Any Matcher extern const internal::VariadicOperatorMatcherFunc< 2, std::numeric_limits<unsigned>::max()> anyOf; /// Matches if all given matchers match. /// /// Usable as: Any Matcher extern const internal::VariadicOperatorMatcherFunc< 2, std::numeric_limits<unsigned>::max()> allOf; /// Matches sizeof (C99), alignof (C++11) and vec_step (OpenCL) /// /// Given /// \code /// Foo x = bar; /// int y = sizeof(x) + alignof(x); /// \endcode /// unaryExprOrTypeTraitExpr() /// matches \c sizeof(x) and \c alignof(x) extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnaryExprOrTypeTraitExpr> unaryExprOrTypeTraitExpr; /// Matches unary expressions that have a specific type of argument. /// /// Given /// \code /// int a, c; float b; int s = sizeof(a) + sizeof(b) + alignof(c); /// \endcode /// unaryExprOrTypeTraitExpr(hasArgumentOfType(asString("int")) /// matches \c sizeof(a) and \c alignof(c) AST_MATCHER_P(UnaryExprOrTypeTraitExpr, hasArgumentOfType, internal::Matcher<QualType>, InnerMatcher) { const QualType ArgumentType = Node.getTypeOfArgument(); return InnerMatcher.matches(ArgumentType, Finder, Builder); } /// Matches unary expressions of a certain kind. /// /// Given /// \code /// int x; /// int s = sizeof(x) + alignof(x) /// \endcode /// unaryExprOrTypeTraitExpr(ofKind(UETT_SizeOf)) /// matches \c sizeof(x) /// /// If the matcher is use from clang-query, UnaryExprOrTypeTrait parameter /// should be passed as a quoted string. e.g., ofKind("UETT_SizeOf"). AST_MATCHER_P(UnaryExprOrTypeTraitExpr, ofKind, UnaryExprOrTypeTrait, Kind) { return Node.getKind() == Kind; } /// Same as unaryExprOrTypeTraitExpr, but only matching /// alignof. inline internal::Matcher<Stmt> alignOfExpr( const internal::Matcher<UnaryExprOrTypeTraitExpr> &InnerMatcher) { return stmt(unaryExprOrTypeTraitExpr( allOf(anyOf(ofKind(UETT_AlignOf), ofKind(UETT_PreferredAlignOf)), InnerMatcher))); } /// Same as unaryExprOrTypeTraitExpr, but only matching /// sizeof. inline internal::Matcher<Stmt> sizeOfExpr( const internal::Matcher<UnaryExprOrTypeTraitExpr> &InnerMatcher) { return stmt(unaryExprOrTypeTraitExpr( allOf(ofKind(UETT_SizeOf), InnerMatcher))); } /// Matches NamedDecl nodes that have the specified name. /// /// Supports specifying enclosing namespaces or classes by prefixing the name /// with '<enclosing>::'. /// Does not match typedefs of an underlying type with the given name. /// /// Example matches X (Name == "X") /// \code /// class X; /// \endcode /// /// Example matches X (Name is one of "::a::b::X", "a::b::X", "b::X", "X") /// \code /// namespace a { namespace b { class X; } } /// \endcode inline internal::Matcher<NamedDecl> hasName(const std::string &Name) { return internal::Matcher<NamedDecl>(new internal::HasNameMatcher({Name})); } /// Matches NamedDecl nodes that have any of the specified names. /// /// This matcher is only provided as a performance optimization of hasName. /// \code /// hasAnyName(a, b, c) /// \endcode /// is equivalent to, but faster than /// \code /// anyOf(hasName(a), hasName(b), hasName(c)) /// \endcode extern const internal::VariadicFunction<internal::Matcher<NamedDecl>, StringRef, internal::hasAnyNameFunc> hasAnyName; /// Matches NamedDecl nodes whose fully qualified names contain /// a substring matched by the given RegExp. /// /// Supports specifying enclosing namespaces or classes by /// prefixing the name with '<enclosing>::'. Does not match typedefs /// of an underlying type with the given name. /// /// Example matches X (regexp == "::X") /// \code /// class X; /// \endcode /// /// Example matches X (regexp is one of "::X", "^foo::.*X", among others) /// \code /// namespace foo { namespace bar { class X; } } /// \endcode AST_MATCHER_P(NamedDecl, matchesName, std::string, RegExp) { assert(!RegExp.empty()); std::string FullNameString = "::" + Node.getQualifiedNameAsString(); llvm::Regex RE(RegExp); return RE.match(FullNameString); } /// Matches overloaded operator names. /// /// Matches overloaded operator names specified in strings without the /// "operator" prefix: e.g. "<<". /// /// Given: /// \code /// class A { int operator*(); }; /// const A &operator<<(const A &a, const A &b); /// A a; /// a << a; // <-- This matches /// \endcode /// /// \c cxxOperatorCallExpr(hasOverloadedOperatorName("<<"))) matches the /// specified line and /// \c cxxRecordDecl(hasMethod(hasOverloadedOperatorName("*"))) /// matches the declaration of \c A. /// /// Usable as: Matcher<CXXOperatorCallExpr>, Matcher<FunctionDecl> inline internal::PolymorphicMatcherWithParam1< internal::HasOverloadedOperatorNameMatcher, StringRef, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl)> hasOverloadedOperatorName(StringRef Name) { return internal::PolymorphicMatcherWithParam1< internal::HasOverloadedOperatorNameMatcher, StringRef, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl)>(Name); } /// Matches C++ classes that are directly or indirectly derived from /// a class matching \c Base. /// /// Note that a class is not considered to be derived from itself. /// /// Example matches Y, Z, C (Base == hasName("X")) /// \code /// class X; /// class Y : public X {}; // directly derived /// class Z : public Y {}; // indirectly derived /// typedef X A; /// typedef A B; /// class C : public B {}; // derived from a typedef of X /// \endcode /// /// In the following example, Bar matches isDerivedFrom(hasName("X")): /// \code /// class Foo; /// typedef Foo X; /// class Bar : public Foo {}; // derived from a type that X is a typedef of /// \endcode AST_MATCHER_P(CXXRecordDecl, isDerivedFrom, internal::Matcher<NamedDecl>, Base) { return Finder->classIsDerivedFrom(&Node, Base, Builder); } /// Overloaded method as shortcut for \c isDerivedFrom(hasName(...)). AST_MATCHER_P_OVERLOAD(CXXRecordDecl, isDerivedFrom, std::string, BaseName, 1) { assert(!BaseName.empty()); return isDerivedFrom(hasName(BaseName)).matches(Node, Finder, Builder); } /// Similar to \c isDerivedFrom(), but also matches classes that directly /// match \c Base. AST_MATCHER_P_OVERLOAD(CXXRecordDecl, isSameOrDerivedFrom, internal::Matcher<NamedDecl>, Base, 0) { return Matcher<CXXRecordDecl>(anyOf(Base, isDerivedFrom(Base))) .matches(Node, Finder, Builder); } /// Overloaded method as shortcut for /// \c isSameOrDerivedFrom(hasName(...)). AST_MATCHER_P_OVERLOAD(CXXRecordDecl, isSameOrDerivedFrom, std::string, BaseName, 1) { assert(!BaseName.empty()); return isSameOrDerivedFrom(hasName(BaseName)).matches(Node, Finder, Builder); } /// Matches the first method of a class or struct that satisfies \c /// InnerMatcher. /// /// Given: /// \code /// class A { void func(); }; /// class B { void member(); }; /// \endcode /// /// \c cxxRecordDecl(hasMethod(hasName("func"))) matches the declaration of /// \c A but not \c B. AST_MATCHER_P(CXXRecordDecl, hasMethod, internal::Matcher<CXXMethodDecl>, InnerMatcher) { return matchesFirstInPointerRange(InnerMatcher, Node.method_begin(), Node.method_end(), Finder, Builder); } /// Matches the generated class of lambda expressions. /// /// Given: /// \code /// auto x = []{}; /// \endcode /// /// \c cxxRecordDecl(isLambda()) matches the implicit class declaration of /// \c decltype(x) AST_MATCHER(CXXRecordDecl, isLambda) { return Node.isLambda(); } /// Matches AST nodes that have child AST nodes that match the /// provided matcher. /// /// Example matches X, Y /// (matcher = cxxRecordDecl(has(cxxRecordDecl(hasName("X"))) /// \code /// class X {}; // Matches X, because X::X is a class of name X inside X. /// class Y { class X {}; }; /// class Z { class Y { class X {}; }; }; // Does not match Z. /// \endcode /// /// ChildT must be an AST base type. /// /// Usable as: Any Matcher /// Note that has is direct matcher, so it also matches things like implicit /// casts and paren casts. If you are matching with expr then you should /// probably consider using ignoringParenImpCasts like: /// has(ignoringParenImpCasts(expr())). extern const internal::ArgumentAdaptingMatcherFunc<internal::HasMatcher> has; /// Matches AST nodes that have descendant AST nodes that match the /// provided matcher. /// /// Example matches X, Y, Z /// (matcher = cxxRecordDecl(hasDescendant(cxxRecordDecl(hasName("X"))))) /// \code /// class X {}; // Matches X, because X::X is a class of name X inside X. /// class Y { class X {}; }; /// class Z { class Y { class X {}; }; }; /// \endcode /// /// DescendantT must be an AST base type. /// /// Usable as: Any Matcher extern const internal::ArgumentAdaptingMatcherFunc< internal::HasDescendantMatcher> hasDescendant; /// Matches AST nodes that have child AST nodes that match the /// provided matcher. /// /// Example matches X, Y, Y::X, Z::Y, Z::Y::X /// (matcher = cxxRecordDecl(forEach(cxxRecordDecl(hasName("X"))) /// \code /// class X {}; /// class Y { class X {}; }; // Matches Y, because Y::X is a class of name X /// // inside Y. /// class Z { class Y { class X {}; }; }; // Does not match Z. /// \endcode /// /// ChildT must be an AST base type. /// /// As opposed to 'has', 'forEach' will cause a match for each result that /// matches instead of only on the first one. /// /// Usable as: Any Matcher extern const internal::ArgumentAdaptingMatcherFunc<internal::ForEachMatcher> forEach; /// Matches AST nodes that have descendant AST nodes that match the /// provided matcher. /// /// Example matches X, A, A::X, B, B::C, B::C::X /// (matcher = cxxRecordDecl(forEachDescendant(cxxRecordDecl(hasName("X"))))) /// \code /// class X {}; /// class A { class X {}; }; // Matches A, because A::X is a class of name /// // X inside A. /// class B { class C { class X {}; }; }; /// \endcode /// /// DescendantT must be an AST base type. /// /// As opposed to 'hasDescendant', 'forEachDescendant' will cause a match for /// each result that matches instead of only on the first one. /// /// Note: Recursively combined ForEachDescendant can cause many matches: /// cxxRecordDecl(forEachDescendant(cxxRecordDecl( /// forEachDescendant(cxxRecordDecl()) /// ))) /// will match 10 times (plus injected class name matches) on: /// \code /// class A { class B { class C { class D { class E {}; }; }; }; }; /// \endcode /// /// Usable as: Any Matcher extern const internal::ArgumentAdaptingMatcherFunc< internal::ForEachDescendantMatcher> forEachDescendant; /// Matches if the node or any descendant matches. /// /// Generates results for each match. /// /// For example, in: /// \code /// class A { class B {}; class C {}; }; /// \endcode /// The matcher: /// \code /// cxxRecordDecl(hasName("::A"), /// findAll(cxxRecordDecl(isDefinition()).bind("m"))) /// \endcode /// will generate results for \c A, \c B and \c C. /// /// Usable as: Any Matcher template <typename T> internal::Matcher<T> findAll(const internal::Matcher<T> &Matcher) { return eachOf(Matcher, forEachDescendant(Matcher)); } /// Matches AST nodes that have a parent that matches the provided /// matcher. /// /// Given /// \code /// void f() { for (;;) { int x = 42; if (true) { int x = 43; } } } /// \endcode /// \c compoundStmt(hasParent(ifStmt())) matches "{ int x = 43; }". /// /// Usable as: Any Matcher extern const internal::ArgumentAdaptingMatcherFunc< internal::HasParentMatcher, internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>, internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>> hasParent; /// Matches AST nodes that have an ancestor that matches the provided /// matcher. /// /// Given /// \code /// void f() { if (true) { int x = 42; } } /// void g() { for (;;) { int x = 43; } } /// \endcode /// \c expr(integerLiteral(hasAncestor(ifStmt()))) matches \c 42, but not 43. /// /// Usable as: Any Matcher extern const internal::ArgumentAdaptingMatcherFunc< internal::HasAncestorMatcher, internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>, internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>> hasAncestor; /// Matches if the provided matcher does not match. /// /// Example matches Y (matcher = cxxRecordDecl(unless(hasName("X")))) /// \code /// class X {}; /// class Y {}; /// \endcode /// /// Usable as: Any Matcher extern const internal::VariadicOperatorMatcherFunc<1, 1> unless; /// Matches a node if the declaration associated with that node /// matches the given matcher. /// /// The associated declaration is: /// - for type nodes, the declaration of the underlying type /// - for CallExpr, the declaration of the callee /// - for MemberExpr, the declaration of the referenced member /// - for CXXConstructExpr, the declaration of the constructor /// - for CXXNewExpr, the declaration of the operator new /// - for ObjCIvarExpr, the declaration of the ivar /// /// For type nodes, hasDeclaration will generally match the declaration of the /// sugared type. Given /// \code /// class X {}; /// typedef X Y; /// Y y; /// \endcode /// in varDecl(hasType(hasDeclaration(decl()))) the decl will match the /// typedefDecl. A common use case is to match the underlying, desugared type. /// This can be achieved by using the hasUnqualifiedDesugaredType matcher: /// \code /// varDecl(hasType(hasUnqualifiedDesugaredType( /// recordType(hasDeclaration(decl()))))) /// \endcode /// In this matcher, the decl will match the CXXRecordDecl of class X. /// /// Usable as: Matcher<AddrLabelExpr>, Matcher<CallExpr>, /// Matcher<CXXConstructExpr>, Matcher<CXXNewExpr>, Matcher<DeclRefExpr>, /// Matcher<EnumType>, Matcher<InjectedClassNameType>, Matcher<LabelStmt>, /// Matcher<MemberExpr>, Matcher<QualType>, Matcher<RecordType>, /// Matcher<TagType>, Matcher<TemplateSpecializationType>, /// Matcher<TemplateTypeParmType>, Matcher<TypedefType>, /// Matcher<UnresolvedUsingType> inline internal::PolymorphicMatcherWithParam1< internal::HasDeclarationMatcher, internal::Matcher<Decl>, void(internal::HasDeclarationSupportedTypes)> hasDeclaration(const internal::Matcher<Decl> &InnerMatcher) { return internal::PolymorphicMatcherWithParam1< internal::HasDeclarationMatcher, internal::Matcher<Decl>, void(internal::HasDeclarationSupportedTypes)>(InnerMatcher); } /// Matches a \c NamedDecl whose underlying declaration matches the given /// matcher. /// /// Given /// \code /// namespace N { template<class T> void f(T t); } /// template <class T> void g() { using N::f; f(T()); } /// \endcode /// \c unresolvedLookupExpr(hasAnyDeclaration( /// namedDecl(hasUnderlyingDecl(hasName("::N::f"))))) /// matches the use of \c f in \c g() . AST_MATCHER_P(NamedDecl, hasUnderlyingDecl, internal::Matcher<NamedDecl>, InnerMatcher) { const NamedDecl *UnderlyingDecl = Node.getUnderlyingDecl(); return UnderlyingDecl != nullptr && InnerMatcher.matches(*UnderlyingDecl, Finder, Builder); } /// Matches on the implicit object argument of a member call expression, after /// stripping off any parentheses or implicit casts. /// /// Given /// \code /// class Y { public: void m(); }; /// Y g(); /// class X : public Y {}; /// void z(Y y, X x) { y.m(); (g()).m(); x.m(); } /// \endcode /// cxxMemberCallExpr(on(hasType(cxxRecordDecl(hasName("Y"))))) /// matches `y.m()` and `(g()).m()`. /// cxxMemberCallExpr(on(hasType(cxxRecordDecl(hasName("X"))))) /// matches `x.m()`. /// cxxMemberCallExpr(on(callExpr())) /// matches `(g()).m()`. /// /// FIXME: Overload to allow directly matching types? AST_MATCHER_P(CXXMemberCallExpr, on, internal::Matcher<Expr>, InnerMatcher) { const Expr *ExprNode = Node.getImplicitObjectArgument() ->IgnoreParenImpCasts(); return (ExprNode != nullptr && InnerMatcher.matches(*ExprNode, Finder, Builder)); } /// Matches on the receiver of an ObjectiveC Message expression. /// /// Example /// matcher = objCMessageExpr(hasReceiverType(asString("UIWebView *"))); /// matches the [webView ...] message invocation. /// \code /// NSString *webViewJavaScript = ... /// UIWebView *webView = ... /// [webView stringByEvaluatingJavaScriptFromString:webViewJavascript]; /// \endcode AST_MATCHER_P(ObjCMessageExpr, hasReceiverType, internal::Matcher<QualType>, InnerMatcher) { const QualType TypeDecl = Node.getReceiverType(); return InnerMatcher.matches(TypeDecl, Finder, Builder); } /// Returns true when the Objective-C method declaration is a class method. /// /// Example /// matcher = objcMethodDecl(isClassMethod()) /// matches /// \code /// @interface I + (void)foo; @end /// \endcode /// but not /// \code /// @interface I - (void)bar; @end /// \endcode AST_MATCHER(ObjCMethodDecl, isClassMethod) { return Node.isClassMethod(); } /// Returns true when the Objective-C method declaration is an instance method. /// /// Example /// matcher = objcMethodDecl(isInstanceMethod()) /// matches /// \code /// @interface I - (void)bar; @end /// \endcode /// but not /// \code /// @interface I + (void)foo; @end /// \endcode AST_MATCHER(ObjCMethodDecl, isInstanceMethod) { return Node.isInstanceMethod(); } /// Returns true when the Objective-C message is sent to a class. /// /// Example /// matcher = objcMessageExpr(isClassMessage()) /// matches /// \code /// [NSString stringWithFormat:@"format"]; /// \endcode /// but not /// \code /// NSString *x = @"hello"; /// [x containsString:@"h"]; /// \endcode AST_MATCHER(ObjCMessageExpr, isClassMessage) { return Node.isClassMessage(); } /// Returns true when the Objective-C message is sent to an instance. /// /// Example /// matcher = objcMessageExpr(isInstanceMessage()) /// matches /// \code /// NSString *x = @"hello"; /// [x containsString:@"h"]; /// \endcode /// but not /// \code /// [NSString stringWithFormat:@"format"]; /// \endcode AST_MATCHER(ObjCMessageExpr, isInstanceMessage) { return Node.isInstanceMessage(); } /// Matches if the Objective-C message is sent to an instance, /// and the inner matcher matches on that instance. /// /// For example the method call in /// \code /// NSString *x = @"hello"; /// [x containsString:@"h"]; /// \endcode /// is matched by /// objcMessageExpr(hasReceiver(declRefExpr(to(varDecl(hasName("x")))))) AST_MATCHER_P(ObjCMessageExpr, hasReceiver, internal::Matcher<Expr>, InnerMatcher) { const Expr *ReceiverNode = Node.getInstanceReceiver(); return (ReceiverNode != nullptr && InnerMatcher.matches(*ReceiverNode->IgnoreParenImpCasts(), Finder, Builder)); } /// Matches when BaseName == Selector.getAsString() /// /// matcher = objCMessageExpr(hasSelector("loadHTMLString:baseURL:")); /// matches the outer message expr in the code below, but NOT the message /// invocation for self.bodyView. /// \code /// [self.bodyView loadHTMLString:html baseURL:NULL]; /// \endcode AST_MATCHER_P(ObjCMessageExpr, hasSelector, std::string, BaseName) { Selector Sel = Node.getSelector(); return BaseName.compare(Sel.getAsString()) == 0; } /// Matches when at least one of the supplied string equals to the /// Selector.getAsString() /// /// matcher = objCMessageExpr(hasSelector("methodA:", "methodB:")); /// matches both of the expressions below: /// \code /// [myObj methodA:argA]; /// [myObj methodB:argB]; /// \endcode extern const internal::VariadicFunction<internal::Matcher<ObjCMessageExpr>, StringRef, internal::hasAnySelectorFunc> hasAnySelector; /// Matches ObjC selectors whose name contains /// a substring matched by the given RegExp. /// matcher = objCMessageExpr(matchesSelector("loadHTMLString\:baseURL?")); /// matches the outer message expr in the code below, but NOT the message /// invocation for self.bodyView. /// \code /// [self.bodyView loadHTMLString:html baseURL:NULL]; /// \endcode AST_MATCHER_P(ObjCMessageExpr, matchesSelector, std::string, RegExp) { assert(!RegExp.empty()); std::string SelectorString = Node.getSelector().getAsString(); llvm::Regex RE(RegExp); return RE.match(SelectorString); } /// Matches when the selector is the empty selector /// /// Matches only when the selector of the objCMessageExpr is NULL. This may /// represent an error condition in the tree! AST_MATCHER(ObjCMessageExpr, hasNullSelector) { return Node.getSelector().isNull(); } /// Matches when the selector is a Unary Selector /// /// matcher = objCMessageExpr(matchesSelector(hasUnarySelector()); /// matches self.bodyView in the code below, but NOT the outer message /// invocation of "loadHTMLString:baseURL:". /// \code /// [self.bodyView loadHTMLString:html baseURL:NULL]; /// \endcode AST_MATCHER(ObjCMessageExpr, hasUnarySelector) { return Node.getSelector().isUnarySelector(); } /// Matches when the selector is a keyword selector /// /// objCMessageExpr(hasKeywordSelector()) matches the generated setFrame /// message expression in /// /// \code /// UIWebView *webView = ...; /// CGRect bodyFrame = webView.frame; /// bodyFrame.size.height = self.bodyContentHeight; /// webView.frame = bodyFrame; /// // ^---- matches here /// \endcode AST_MATCHER(ObjCMessageExpr, hasKeywordSelector) { return Node.getSelector().isKeywordSelector(); } /// Matches when the selector has the specified number of arguments /// /// matcher = objCMessageExpr(numSelectorArgs(0)); /// matches self.bodyView in the code below /// /// matcher = objCMessageExpr(numSelectorArgs(2)); /// matches the invocation of "loadHTMLString:baseURL:" but not that /// of self.bodyView /// \code /// [self.bodyView loadHTMLString:html baseURL:NULL]; /// \endcode AST_MATCHER_P(ObjCMessageExpr, numSelectorArgs, unsigned, N) { return Node.getSelector().getNumArgs() == N; } /// Matches if the call expression's callee expression matches. /// /// Given /// \code /// class Y { void x() { this->x(); x(); Y y; y.x(); } }; /// void f() { f(); } /// \endcode /// callExpr(callee(expr())) /// matches this->x(), x(), y.x(), f() /// with callee(...) /// matching this->x, x, y.x, f respectively /// /// Note: Callee cannot take the more general internal::Matcher<Expr> /// because this introduces ambiguous overloads with calls to Callee taking a /// internal::Matcher<Decl>, as the matcher hierarchy is purely /// implemented in terms of implicit casts. AST_MATCHER_P(CallExpr, callee, internal::Matcher<Stmt>, InnerMatcher) { const Expr *ExprNode = Node.getCallee(); return (ExprNode != nullptr && InnerMatcher.matches(*ExprNode, Finder, Builder)); } /// Matches if the call expression's callee's declaration matches the /// given matcher. /// /// Example matches y.x() (matcher = callExpr(callee( /// cxxMethodDecl(hasName("x"))))) /// \code /// class Y { public: void x(); }; /// void z() { Y y; y.x(); } /// \endcode AST_MATCHER_P_OVERLOAD(CallExpr, callee, internal::Matcher<Decl>, InnerMatcher, 1) { return callExpr(hasDeclaration(InnerMatcher)).matches(Node, Finder, Builder); } /// Matches if the expression's or declaration's type matches a type /// matcher. /// /// Example matches x (matcher = expr(hasType(cxxRecordDecl(hasName("X"))))) /// and z (matcher = varDecl(hasType(cxxRecordDecl(hasName("X"))))) /// and U (matcher = typedefDecl(hasType(asString("int"))) /// and friend class X (matcher = friendDecl(hasType("X")) /// \code /// class X {}; /// void y(X &x) { x; X z; } /// typedef int U; /// class Y { friend class X; }; /// \endcode AST_POLYMORPHIC_MATCHER_P_OVERLOAD( hasType, AST_POLYMORPHIC_SUPPORTED_TYPES(Expr, FriendDecl, TypedefNameDecl, ValueDecl), internal::Matcher<QualType>, InnerMatcher, 0) { QualType QT = internal::getUnderlyingType(Node); if (!QT.isNull()) return InnerMatcher.matches(QT, Finder, Builder); return false; } /// Overloaded to match the declaration of the expression's or value /// declaration's type. /// /// In case of a value declaration (for example a variable declaration), /// this resolves one layer of indirection. For example, in the value /// declaration "X x;", cxxRecordDecl(hasName("X")) matches the declaration of /// X, while varDecl(hasType(cxxRecordDecl(hasName("X")))) matches the /// declaration of x. /// /// Example matches x (matcher = expr(hasType(cxxRecordDecl(hasName("X"))))) /// and z (matcher = varDecl(hasType(cxxRecordDecl(hasName("X"))))) /// and friend class X (matcher = friendDecl(hasType("X")) /// \code /// class X {}; /// void y(X &x) { x; X z; } /// class Y { friend class X; }; /// \endcode /// /// Usable as: Matcher<Expr>, Matcher<ValueDecl> AST_POLYMORPHIC_MATCHER_P_OVERLOAD( hasType, AST_POLYMORPHIC_SUPPORTED_TYPES(Expr, FriendDecl, ValueDecl), internal::Matcher<Decl>, InnerMatcher, 1) { QualType QT = internal::getUnderlyingType(Node); if (!QT.isNull()) return qualType(hasDeclaration(InnerMatcher)).matches(QT, Finder, Builder); return false; } /// Matches if the type location of the declarator decl's type matches /// the inner matcher. /// /// Given /// \code /// int x; /// \endcode /// declaratorDecl(hasTypeLoc(loc(asString("int")))) /// matches int x AST_MATCHER_P(DeclaratorDecl, hasTypeLoc, internal::Matcher<TypeLoc>, Inner) { if (!Node.getTypeSourceInfo()) // This happens for example for implicit destructors. return false; return Inner.matches(Node.getTypeSourceInfo()->getTypeLoc(), Finder, Builder); } /// Matches if the matched type is represented by the given string. /// /// Given /// \code /// class Y { public: void x(); }; /// void z() { Y* y; y->x(); } /// \endcode /// cxxMemberCallExpr(on(hasType(asString("class Y *")))) /// matches y->x() AST_MATCHER_P(QualType, asString, std::string, Name) { return Name == Node.getAsString(); } /// Matches if the matched type is a pointer type and the pointee type /// matches the specified matcher. /// /// Example matches y->x() /// (matcher = cxxMemberCallExpr(on(hasType(pointsTo /// cxxRecordDecl(hasName("Y"))))))) /// \code /// class Y { public: void x(); }; /// void z() { Y *y; y->x(); } /// \endcode AST_MATCHER_P( QualType, pointsTo, internal::Matcher<QualType>, InnerMatcher) { return (!Node.isNull() && Node->isAnyPointerType() && InnerMatcher.matches(Node->getPointeeType(), Finder, Builder)); } /// Overloaded to match the pointee type's declaration. AST_MATCHER_P_OVERLOAD(QualType, pointsTo, internal::Matcher<Decl>, InnerMatcher, 1) { return pointsTo(qualType(hasDeclaration(InnerMatcher))) .matches(Node, Finder, Builder); } /// Matches if the matched type matches the unqualified desugared /// type of the matched node. /// /// For example, in: /// \code /// class A {}; /// using B = A; /// \endcode /// The matcher type(hasUnqualifiedDesugaredType(recordType())) matches /// both B and A. AST_MATCHER_P(Type, hasUnqualifiedDesugaredType, internal::Matcher<Type>, InnerMatcher) { return InnerMatcher.matches(*Node.getUnqualifiedDesugaredType(), Finder, Builder); } /// Matches if the matched type is a reference type and the referenced /// type matches the specified matcher. /// /// Example matches X &x and const X &y /// (matcher = varDecl(hasType(references(cxxRecordDecl(hasName("X")))))) /// \code /// class X { /// void a(X b) { /// X &x = b; /// const X &y = b; /// } /// }; /// \endcode AST_MATCHER_P(QualType, references, internal::Matcher<QualType>, InnerMatcher) { return (!Node.isNull() && Node->isReferenceType() && InnerMatcher.matches(Node->getPointeeType(), Finder, Builder)); } /// Matches QualTypes whose canonical type matches InnerMatcher. /// /// Given: /// \code /// typedef int &int_ref; /// int a; /// int_ref b = a; /// \endcode /// /// \c varDecl(hasType(qualType(referenceType()))))) will not match the /// declaration of b but \c /// varDecl(hasType(qualType(hasCanonicalType(referenceType())))))) does. AST_MATCHER_P(QualType, hasCanonicalType, internal::Matcher<QualType>, InnerMatcher) { if (Node.isNull()) return false; return InnerMatcher.matches(Node.getCanonicalType(), Finder, Builder); } /// Overloaded to match the referenced type's declaration. AST_MATCHER_P_OVERLOAD(QualType, references, internal::Matcher<Decl>, InnerMatcher, 1) { return references(qualType(hasDeclaration(InnerMatcher))) .matches(Node, Finder, Builder); } /// Matches on the implicit object argument of a member call expression. Unlike /// `on`, matches the argument directly without stripping away anything. /// /// Given /// \code /// class Y { public: void m(); }; /// Y g(); /// class X : public Y { void g(); }; /// void z(Y y, X x) { y.m(); x.m(); x.g(); (g()).m(); } /// \endcode /// cxxMemberCallExpr(onImplicitObjectArgument(hasType( /// cxxRecordDecl(hasName("Y"))))) /// matches `y.m()`, `x.m()` and (g()).m(), but not `x.g()`. /// cxxMemberCallExpr(on(callExpr())) /// does not match `(g()).m()`, because the parens are not ignored. /// /// FIXME: Overload to allow directly matching types? AST_MATCHER_P(CXXMemberCallExpr, onImplicitObjectArgument, internal::Matcher<Expr>, InnerMatcher) { const Expr *ExprNode = Node.getImplicitObjectArgument(); return (ExprNode != nullptr && InnerMatcher.matches(*ExprNode, Finder, Builder)); } /// Matches if the type of the expression's implicit object argument either /// matches the InnerMatcher, or is a pointer to a type that matches the /// InnerMatcher. /// /// Given /// \code /// class Y { public: void m(); }; /// class X : public Y { void g(); }; /// void z() { Y y; y.m(); Y *p; p->m(); X x; x.m(); x.g(); } /// \endcode /// cxxMemberCallExpr(thisPointerType(hasDeclaration( /// cxxRecordDecl(hasName("Y"))))) /// matches `y.m()`, `p->m()` and `x.m()`. /// cxxMemberCallExpr(thisPointerType(hasDeclaration( /// cxxRecordDecl(hasName("X"))))) /// matches `x.g()`. AST_MATCHER_P_OVERLOAD(CXXMemberCallExpr, thisPointerType, internal::Matcher<QualType>, InnerMatcher, 0) { return onImplicitObjectArgument( anyOf(hasType(InnerMatcher), hasType(pointsTo(InnerMatcher)))) .matches(Node, Finder, Builder); } /// Overloaded to match the type's declaration. AST_MATCHER_P_OVERLOAD(CXXMemberCallExpr, thisPointerType, internal::Matcher<Decl>, InnerMatcher, 1) { return onImplicitObjectArgument( anyOf(hasType(InnerMatcher), hasType(pointsTo(InnerMatcher)))) .matches(Node, Finder, Builder); } /// Matches a DeclRefExpr that refers to a declaration that matches the /// specified matcher. /// /// Example matches x in if(x) /// (matcher = declRefExpr(to(varDecl(hasName("x"))))) /// \code /// bool x; /// if (x) {} /// \endcode AST_MATCHER_P(DeclRefExpr, to, internal::Matcher<Decl>, InnerMatcher) { const Decl *DeclNode = Node.getDecl(); return (DeclNode != nullptr && InnerMatcher.matches(*DeclNode, Finder, Builder)); } /// Matches a \c DeclRefExpr that refers to a declaration through a /// specific using shadow declaration. /// /// Given /// \code /// namespace a { void f() {} } /// using a::f; /// void g() { /// f(); // Matches this .. /// a::f(); // .. but not this. /// } /// \endcode /// declRefExpr(throughUsingDecl(anything())) /// matches \c f() AST_MATCHER_P(DeclRefExpr, throughUsingDecl, internal::Matcher<UsingShadowDecl>, InnerMatcher) { const NamedDecl *FoundDecl = Node.getFoundDecl(); if (const UsingShadowDecl *UsingDecl = dyn_cast<UsingShadowDecl>(FoundDecl)) return InnerMatcher.matches(*UsingDecl, Finder, Builder); return false; } /// Matches an \c OverloadExpr if any of the declarations in the set of /// overloads matches the given matcher. /// /// Given /// \code /// template <typename T> void foo(T); /// template <typename T> void bar(T); /// template <typename T> void baz(T t) { /// foo(t); /// bar(t); /// } /// \endcode /// unresolvedLookupExpr(hasAnyDeclaration( /// functionTemplateDecl(hasName("foo")))) /// matches \c foo in \c foo(t); but not \c bar in \c bar(t); AST_MATCHER_P(OverloadExpr, hasAnyDeclaration, internal::Matcher<Decl>, InnerMatcher) { return matchesFirstInPointerRange(InnerMatcher, Node.decls_begin(), Node.decls_end(), Finder, Builder); } /// Matches the Decl of a DeclStmt which has a single declaration. /// /// Given /// \code /// int a, b; /// int c; /// \endcode /// declStmt(hasSingleDecl(anything())) /// matches 'int c;' but not 'int a, b;'. AST_MATCHER_P(DeclStmt, hasSingleDecl, internal::Matcher<Decl>, InnerMatcher) { if (Node.isSingleDecl()) { const Decl *FoundDecl = Node.getSingleDecl(); return InnerMatcher.matches(*FoundDecl, Finder, Builder); } return false; } /// Matches a variable declaration that has an initializer expression /// that matches the given matcher. /// /// Example matches x (matcher = varDecl(hasInitializer(callExpr()))) /// \code /// bool y() { return true; } /// bool x = y(); /// \endcode AST_MATCHER_P( VarDecl, hasInitializer, internal::Matcher<Expr>, InnerMatcher) { const Expr *Initializer = Node.getAnyInitializer(); return (Initializer != nullptr && InnerMatcher.matches(*Initializer, Finder, Builder)); } /// \brief Matches a static variable with local scope. /// /// Example matches y (matcher = varDecl(isStaticLocal())) /// \code /// void f() { /// int x; /// static int y; /// } /// static int z; /// \endcode AST_MATCHER(VarDecl, isStaticLocal) { return Node.isStaticLocal(); } /// Matches a variable declaration that has function scope and is a /// non-static local variable. /// /// Example matches x (matcher = varDecl(hasLocalStorage()) /// \code /// void f() { /// int x; /// static int y; /// } /// int z; /// \endcode AST_MATCHER(VarDecl, hasLocalStorage) { return Node.hasLocalStorage(); } /// Matches a variable declaration that does not have local storage. /// /// Example matches y and z (matcher = varDecl(hasGlobalStorage()) /// \code /// void f() { /// int x; /// static int y; /// } /// int z; /// \endcode AST_MATCHER(VarDecl, hasGlobalStorage) { return Node.hasGlobalStorage(); } /// Matches a variable declaration that has automatic storage duration. /// /// Example matches x, but not y, z, or a. /// (matcher = varDecl(hasAutomaticStorageDuration()) /// \code /// void f() { /// int x; /// static int y; /// thread_local int z; /// } /// int a; /// \endcode AST_MATCHER(VarDecl, hasAutomaticStorageDuration) { return Node.getStorageDuration() == SD_Automatic; } /// Matches a variable declaration that has static storage duration. /// It includes the variable declared at namespace scope and those declared /// with "static" and "extern" storage class specifiers. /// /// \code /// void f() { /// int x; /// static int y; /// thread_local int z; /// } /// int a; /// static int b; /// extern int c; /// varDecl(hasStaticStorageDuration()) /// matches the function declaration y, a, b and c. /// \endcode AST_MATCHER(VarDecl, hasStaticStorageDuration) { return Node.getStorageDuration() == SD_Static; } /// Matches a variable declaration that has thread storage duration. /// /// Example matches z, but not x, z, or a. /// (matcher = varDecl(hasThreadStorageDuration()) /// \code /// void f() { /// int x; /// static int y; /// thread_local int z; /// } /// int a; /// \endcode AST_MATCHER(VarDecl, hasThreadStorageDuration) { return Node.getStorageDuration() == SD_Thread; } /// Matches a variable declaration that is an exception variable from /// a C++ catch block, or an Objective-C \@catch statement. /// /// Example matches x (matcher = varDecl(isExceptionVariable()) /// \code /// void f(int y) { /// try { /// } catch (int x) { /// } /// } /// \endcode AST_MATCHER(VarDecl, isExceptionVariable) { return Node.isExceptionVariable(); } /// Checks that a call expression or a constructor call expression has /// a specific number of arguments (including absent default arguments). /// /// Example matches f(0, 0) (matcher = callExpr(argumentCountIs(2))) /// \code /// void f(int x, int y); /// f(0, 0); /// \endcode AST_POLYMORPHIC_MATCHER_P(argumentCountIs, AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr, CXXConstructExpr, ObjCMessageExpr), unsigned, N) { return Node.getNumArgs() == N; } /// Matches the n'th argument of a call expression or a constructor /// call expression. /// /// Example matches y in x(y) /// (matcher = callExpr(hasArgument(0, declRefExpr()))) /// \code /// void x(int) { int y; x(y); } /// \endcode AST_POLYMORPHIC_MATCHER_P2(hasArgument, AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr, CXXConstructExpr, ObjCMessageExpr), unsigned, N, internal::Matcher<Expr>, InnerMatcher) { return (N < Node.getNumArgs() && InnerMatcher.matches( *Node.getArg(N)->IgnoreParenImpCasts(), Finder, Builder)); } /// Matches the n'th item of an initializer list expression. /// /// Example matches y. /// (matcher = initListExpr(hasInit(0, expr()))) /// \code /// int x{y}. /// \endcode AST_MATCHER_P2(InitListExpr, hasInit, unsigned, N, ast_matchers::internal::Matcher<Expr>, InnerMatcher) { return N < Node.getNumInits() && InnerMatcher.matches(*Node.getInit(N), Finder, Builder); } /// Matches declaration statements that contain a specific number of /// declarations. /// /// Example: Given /// \code /// int a, b; /// int c; /// int d = 2, e; /// \endcode /// declCountIs(2) /// matches 'int a, b;' and 'int d = 2, e;', but not 'int c;'. AST_MATCHER_P(DeclStmt, declCountIs, unsigned, N) { return std::distance(Node.decl_begin(), Node.decl_end()) == (ptrdiff_t)N; } /// Matches the n'th declaration of a declaration statement. /// /// Note that this does not work for global declarations because the AST /// breaks up multiple-declaration DeclStmt's into multiple single-declaration /// DeclStmt's. /// Example: Given non-global declarations /// \code /// int a, b = 0; /// int c; /// int d = 2, e; /// \endcode /// declStmt(containsDeclaration( /// 0, varDecl(hasInitializer(anything())))) /// matches only 'int d = 2, e;', and /// declStmt(containsDeclaration(1, varDecl())) /// \code /// matches 'int a, b = 0' as well as 'int d = 2, e;' /// but 'int c;' is not matched. /// \endcode AST_MATCHER_P2(DeclStmt, containsDeclaration, unsigned, N, internal::Matcher<Decl>, InnerMatcher) { const unsigned NumDecls = std::distance(Node.decl_begin(), Node.decl_end()); if (N >= NumDecls) return false; DeclStmt::const_decl_iterator Iterator = Node.decl_begin(); std::advance(Iterator, N); return InnerMatcher.matches(**Iterator, Finder, Builder); } /// Matches a C++ catch statement that has a catch-all handler. /// /// Given /// \code /// try { /// // ... /// } catch (int) { /// // ... /// } catch (...) { /// // ... /// } /// \endcode /// cxxCatchStmt(isCatchAll()) matches catch(...) but not catch(int). AST_MATCHER(CXXCatchStmt, isCatchAll) { return Node.getExceptionDecl() == nullptr; } /// Matches a constructor initializer. /// /// Given /// \code /// struct Foo { /// Foo() : foo_(1) { } /// int foo_; /// }; /// \endcode /// cxxRecordDecl(has(cxxConstructorDecl( /// hasAnyConstructorInitializer(anything()) /// ))) /// record matches Foo, hasAnyConstructorInitializer matches foo_(1) AST_MATCHER_P(CXXConstructorDecl, hasAnyConstructorInitializer, internal::Matcher<CXXCtorInitializer>, InnerMatcher) { return matchesFirstInPointerRange(InnerMatcher, Node.init_begin(), Node.init_end(), Finder, Builder); } /// Matches the field declaration of a constructor initializer. /// /// Given /// \code /// struct Foo { /// Foo() : foo_(1) { } /// int foo_; /// }; /// \endcode /// cxxRecordDecl(has(cxxConstructorDecl(hasAnyConstructorInitializer( /// forField(hasName("foo_")))))) /// matches Foo /// with forField matching foo_ AST_MATCHER_P(CXXCtorInitializer, forField, internal::Matcher<FieldDecl>, InnerMatcher) { const FieldDecl *NodeAsDecl = Node.getAnyMember(); return (NodeAsDecl != nullptr && InnerMatcher.matches(*NodeAsDecl, Finder, Builder)); } /// Matches the initializer expression of a constructor initializer. /// /// Given /// \code /// struct Foo { /// Foo() : foo_(1) { } /// int foo_; /// }; /// \endcode /// cxxRecordDecl(has(cxxConstructorDecl(hasAnyConstructorInitializer( /// withInitializer(integerLiteral(equals(1))))))) /// matches Foo /// with withInitializer matching (1) AST_MATCHER_P(CXXCtorInitializer, withInitializer, internal::Matcher<Expr>, InnerMatcher) { const Expr* NodeAsExpr = Node.getInit(); return (NodeAsExpr != nullptr && InnerMatcher.matches(*NodeAsExpr, Finder, Builder)); } /// Matches a constructor initializer if it is explicitly written in /// code (as opposed to implicitly added by the compiler). /// /// Given /// \code /// struct Foo { /// Foo() { } /// Foo(int) : foo_("A") { } /// string foo_; /// }; /// \endcode /// cxxConstructorDecl(hasAnyConstructorInitializer(isWritten())) /// will match Foo(int), but not Foo() AST_MATCHER(CXXCtorInitializer, isWritten) { return Node.isWritten(); } /// Matches a constructor initializer if it is initializing a base, as /// opposed to a member. /// /// Given /// \code /// struct B {}; /// struct D : B { /// int I; /// D(int i) : I(i) {} /// }; /// struct E : B { /// E() : B() {} /// }; /// \endcode /// cxxConstructorDecl(hasAnyConstructorInitializer(isBaseInitializer())) /// will match E(), but not match D(int). AST_MATCHER(CXXCtorInitializer, isBaseInitializer) { return Node.isBaseInitializer(); } /// Matches a constructor initializer if it is initializing a member, as /// opposed to a base. /// /// Given /// \code /// struct B {}; /// struct D : B { /// int I; /// D(int i) : I(i) {} /// }; /// struct E : B { /// E() : B() {} /// }; /// \endcode /// cxxConstructorDecl(hasAnyConstructorInitializer(isMemberInitializer())) /// will match D(int), but not match E(). AST_MATCHER(CXXCtorInitializer, isMemberInitializer) { return Node.isMemberInitializer(); } /// Matches any argument of a call expression or a constructor call /// expression, or an ObjC-message-send expression. /// /// Given /// \code /// void x(int, int, int) { int y; x(1, y, 42); } /// \endcode /// callExpr(hasAnyArgument(declRefExpr())) /// matches x(1, y, 42) /// with hasAnyArgument(...) /// matching y /// /// For ObjectiveC, given /// \code /// @interface I - (void) f:(int) y; @end /// void foo(I *i) { [i f:12]; } /// \endcode /// objcMessageExpr(hasAnyArgument(integerLiteral(equals(12)))) /// matches [i f:12] AST_POLYMORPHIC_MATCHER_P(hasAnyArgument, AST_POLYMORPHIC_SUPPORTED_TYPES( CallExpr, CXXConstructExpr, CXXUnresolvedConstructExpr, ObjCMessageExpr), internal::Matcher<Expr>, InnerMatcher) { for (const Expr *Arg : Node.arguments()) { BoundNodesTreeBuilder Result(*Builder); if (InnerMatcher.matches(*Arg, Finder, &Result)) { *Builder = std::move(Result); return true; } } return false; } /// Matches a constructor call expression which uses list initialization. AST_MATCHER(CXXConstructExpr, isListInitialization) { return Node.isListInitialization(); } /// Matches a constructor call expression which requires /// zero initialization. /// /// Given /// \code /// void foo() { /// struct point { double x; double y; }; /// point pt[2] = { { 1.0, 2.0 } }; /// } /// \endcode /// initListExpr(has(cxxConstructExpr(requiresZeroInitialization())) /// will match the implicit array filler for pt[1]. AST_MATCHER(CXXConstructExpr, requiresZeroInitialization) { return Node.requiresZeroInitialization(); } /// Matches the n'th parameter of a function or an ObjC method /// declaration or a block. /// /// Given /// \code /// class X { void f(int x) {} }; /// \endcode /// cxxMethodDecl(hasParameter(0, hasType(varDecl()))) /// matches f(int x) {} /// with hasParameter(...) /// matching int x /// /// For ObjectiveC, given /// \code /// @interface I - (void) f:(int) y; @end /// \endcode // /// the matcher objcMethodDecl(hasParameter(0, hasName("y"))) /// matches the declaration of method f with hasParameter /// matching y. AST_POLYMORPHIC_MATCHER_P2(hasParameter, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, ObjCMethodDecl, BlockDecl), unsigned, N, internal::Matcher<ParmVarDecl>, InnerMatcher) { return (N < Node.parameters().size() && InnerMatcher.matches(*Node.parameters()[N], Finder, Builder)); } /// Matches all arguments and their respective ParmVarDecl. /// /// Given /// \code /// void f(int i); /// int y; /// f(y); /// \endcode /// callExpr( /// forEachArgumentWithParam( /// declRefExpr(to(varDecl(hasName("y")))), /// parmVarDecl(hasType(isInteger())) /// )) /// matches f(y); /// with declRefExpr(...) /// matching int y /// and parmVarDecl(...) /// matching int i AST_POLYMORPHIC_MATCHER_P2(forEachArgumentWithParam, AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr, CXXConstructExpr), internal::Matcher<Expr>, ArgMatcher, internal::Matcher<ParmVarDecl>, ParamMatcher) { BoundNodesTreeBuilder Result; // The first argument of an overloaded member operator is the implicit object // argument of the method which should not be matched against a parameter, so // we skip over it here. BoundNodesTreeBuilder Matches; unsigned ArgIndex = cxxOperatorCallExpr(callee(cxxMethodDecl())) .matches(Node, Finder, &Matches) ? 1 : 0; int ParamIndex = 0; bool Matched = false; for (; ArgIndex < Node.getNumArgs(); ++ArgIndex) { BoundNodesTreeBuilder ArgMatches(*Builder); if (ArgMatcher.matches(*(Node.getArg(ArgIndex)->IgnoreParenCasts()), Finder, &ArgMatches)) { BoundNodesTreeBuilder ParamMatches(ArgMatches); if (expr(anyOf(cxxConstructExpr(hasDeclaration(cxxConstructorDecl( hasParameter(ParamIndex, ParamMatcher)))), callExpr(callee(functionDecl( hasParameter(ParamIndex, ParamMatcher)))))) .matches(Node, Finder, &ParamMatches)) { Result.addMatch(ParamMatches); Matched = true; } } ++ParamIndex; } *Builder = std::move(Result); return Matched; } /// Matches any parameter of a function or an ObjC method declaration or a /// block. /// /// Does not match the 'this' parameter of a method. /// /// Given /// \code /// class X { void f(int x, int y, int z) {} }; /// \endcode /// cxxMethodDecl(hasAnyParameter(hasName("y"))) /// matches f(int x, int y, int z) {} /// with hasAnyParameter(...) /// matching int y /// /// For ObjectiveC, given /// \code /// @interface I - (void) f:(int) y; @end /// \endcode // /// the matcher objcMethodDecl(hasAnyParameter(hasName("y"))) /// matches the declaration of method f with hasParameter /// matching y. /// /// For blocks, given /// \code /// b = ^(int y) { printf("%d", y) }; /// \endcode /// /// the matcher blockDecl(hasAnyParameter(hasName("y"))) /// matches the declaration of the block b with hasParameter /// matching y. AST_POLYMORPHIC_MATCHER_P(hasAnyParameter, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, ObjCMethodDecl, BlockDecl), internal::Matcher<ParmVarDecl>, InnerMatcher) { return matchesFirstInPointerRange(InnerMatcher, Node.param_begin(), Node.param_end(), Finder, Builder); } /// Matches \c FunctionDecls and \c FunctionProtoTypes that have a /// specific parameter count. /// /// Given /// \code /// void f(int i) {} /// void g(int i, int j) {} /// void h(int i, int j); /// void j(int i); /// void k(int x, int y, int z, ...); /// \endcode /// functionDecl(parameterCountIs(2)) /// matches \c g and \c h /// functionProtoType(parameterCountIs(2)) /// matches \c g and \c h /// functionProtoType(parameterCountIs(3)) /// matches \c k AST_POLYMORPHIC_MATCHER_P(parameterCountIs, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, FunctionProtoType), unsigned, N) { return Node.getNumParams() == N; } /// Matches \c FunctionDecls that have a noreturn attribute. /// /// Given /// \code /// void nope(); /// [[noreturn]] void a(); /// __attribute__((noreturn)) void b(); /// struct c { [[noreturn]] c(); }; /// \endcode /// functionDecl(isNoReturn()) /// matches all of those except /// \code /// void nope(); /// \endcode AST_MATCHER(FunctionDecl, isNoReturn) { return Node.isNoReturn(); } /// Matches the return type of a function declaration. /// /// Given: /// \code /// class X { int f() { return 1; } }; /// \endcode /// cxxMethodDecl(returns(asString("int"))) /// matches int f() { return 1; } AST_MATCHER_P(FunctionDecl, returns, internal::Matcher<QualType>, InnerMatcher) { return InnerMatcher.matches(Node.getReturnType(), Finder, Builder); } /// Matches extern "C" function or variable declarations. /// /// Given: /// \code /// extern "C" void f() {} /// extern "C" { void g() {} } /// void h() {} /// extern "C" int x = 1; /// extern "C" int y = 2; /// int z = 3; /// \endcode /// functionDecl(isExternC()) /// matches the declaration of f and g, but not the declaration of h. /// varDecl(isExternC()) /// matches the declaration of x and y, but not the declaration of z. AST_POLYMORPHIC_MATCHER(isExternC, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl)) { return Node.isExternC(); } /// Matches variable/function declarations that have "static" storage /// class specifier ("static" keyword) written in the source. /// /// Given: /// \code /// static void f() {} /// static int i = 0; /// extern int j; /// int k; /// \endcode /// functionDecl(isStaticStorageClass()) /// matches the function declaration f. /// varDecl(isStaticStorageClass()) /// matches the variable declaration i. AST_POLYMORPHIC_MATCHER(isStaticStorageClass, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl)) { return Node.getStorageClass() == SC_Static; } /// Matches deleted function declarations. /// /// Given: /// \code /// void Func(); /// void DeletedFunc() = delete; /// \endcode /// functionDecl(isDeleted()) /// matches the declaration of DeletedFunc, but not Func. AST_MATCHER(FunctionDecl, isDeleted) { return Node.isDeleted(); } /// Matches defaulted function declarations. /// /// Given: /// \code /// class A { ~A(); }; /// class B { ~B() = default; }; /// \endcode /// functionDecl(isDefaulted()) /// matches the declaration of ~B, but not ~A. AST_MATCHER(FunctionDecl, isDefaulted) { return Node.isDefaulted(); } /// Matches functions that have a dynamic exception specification. /// /// Given: /// \code /// void f(); /// void g() noexcept; /// void h() noexcept(true); /// void i() noexcept(false); /// void j() throw(); /// void k() throw(int); /// void l() throw(...); /// \endcode /// functionDecl(hasDynamicExceptionSpec()) and /// functionProtoType(hasDynamicExceptionSpec()) /// match the declarations of j, k, and l, but not f, g, h, or i. AST_POLYMORPHIC_MATCHER(hasDynamicExceptionSpec, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, FunctionProtoType)) { if (const FunctionProtoType *FnTy = internal::getFunctionProtoType(Node)) return FnTy->hasDynamicExceptionSpec(); return false; } /// Matches functions that have a non-throwing exception specification. /// /// Given: /// \code /// void f(); /// void g() noexcept; /// void h() throw(); /// void i() throw(int); /// void j() noexcept(false); /// \endcode /// functionDecl(isNoThrow()) and functionProtoType(isNoThrow()) /// match the declarations of g, and h, but not f, i or j. AST_POLYMORPHIC_MATCHER(isNoThrow, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, FunctionProtoType)) { const FunctionProtoType *FnTy = internal::getFunctionProtoType(Node); // If the function does not have a prototype, then it is assumed to be a // throwing function (as it would if the function did not have any exception // specification). if (!FnTy) return false; // Assume the best for any unresolved exception specification. if (isUnresolvedExceptionSpec(FnTy->getExceptionSpecType())) return true; return FnTy->isNothrow(); } /// Matches constexpr variable and function declarations, /// and if constexpr. /// /// Given: /// \code /// constexpr int foo = 42; /// constexpr int bar(); /// void baz() { if constexpr(1 > 0) {} } /// \endcode /// varDecl(isConstexpr()) /// matches the declaration of foo. /// functionDecl(isConstexpr()) /// matches the declaration of bar. /// ifStmt(isConstexpr()) /// matches the if statement in baz. AST_POLYMORPHIC_MATCHER(isConstexpr, AST_POLYMORPHIC_SUPPORTED_TYPES(VarDecl, FunctionDecl, IfStmt)) { return Node.isConstexpr(); } /// Matches the condition expression of an if statement, for loop, /// switch statement or conditional operator. /// /// Example matches true (matcher = hasCondition(cxxBoolLiteral(equals(true)))) /// \code /// if (true) {} /// \endcode AST_POLYMORPHIC_MATCHER_P( hasCondition, AST_POLYMORPHIC_SUPPORTED_TYPES(IfStmt, ForStmt, WhileStmt, DoStmt, SwitchStmt, AbstractConditionalOperator), internal::Matcher<Expr>, InnerMatcher) { const Expr *const Condition = Node.getCond(); return (Condition != nullptr && InnerMatcher.matches(*Condition, Finder, Builder)); } /// Matches the then-statement of an if statement. /// /// Examples matches the if statement /// (matcher = ifStmt(hasThen(cxxBoolLiteral(equals(true))))) /// \code /// if (false) true; else false; /// \endcode AST_MATCHER_P(IfStmt, hasThen, internal::Matcher<Stmt>, InnerMatcher) { const Stmt *const Then = Node.getThen(); return (Then != nullptr && InnerMatcher.matches(*Then, Finder, Builder)); } /// Matches the else-statement of an if statement. /// /// Examples matches the if statement /// (matcher = ifStmt(hasElse(cxxBoolLiteral(equals(true))))) /// \code /// if (false) false; else true; /// \endcode AST_MATCHER_P(IfStmt, hasElse, internal::Matcher<Stmt>, InnerMatcher) { const Stmt *const Else = Node.getElse(); return (Else != nullptr && InnerMatcher.matches(*Else, Finder, Builder)); } /// Matches if a node equals a previously bound node. /// /// Matches a node if it equals the node previously bound to \p ID. /// /// Given /// \code /// class X { int a; int b; }; /// \endcode /// cxxRecordDecl( /// has(fieldDecl(hasName("a"), hasType(type().bind("t")))), /// has(fieldDecl(hasName("b"), hasType(type(equalsBoundNode("t")))))) /// matches the class \c X, as \c a and \c b have the same type. /// /// Note that when multiple matches are involved via \c forEach* matchers, /// \c equalsBoundNodes acts as a filter. /// For example: /// compoundStmt( /// forEachDescendant(varDecl().bind("d")), /// forEachDescendant(declRefExpr(to(decl(equalsBoundNode("d")))))) /// will trigger a match for each combination of variable declaration /// and reference to that variable declaration within a compound statement. AST_POLYMORPHIC_MATCHER_P(equalsBoundNode, AST_POLYMORPHIC_SUPPORTED_TYPES(Stmt, Decl, Type, QualType), std::string, ID) { // FIXME: Figure out whether it makes sense to allow this // on any other node types. // For *Loc it probably does not make sense, as those seem // unique. For NestedNameSepcifier it might make sense, as // those also have pointer identity, but I'm not sure whether // they're ever reused. internal::NotEqualsBoundNodePredicate Predicate; Predicate.ID = ID; Predicate.Node = ast_type_traits::DynTypedNode::create(Node); return Builder->removeBindings(Predicate); } /// Matches the condition variable statement in an if statement. /// /// Given /// \code /// if (A* a = GetAPointer()) {} /// \endcode /// hasConditionVariableStatement(...) /// matches 'A* a = GetAPointer()'. AST_MATCHER_P(IfStmt, hasConditionVariableStatement, internal::Matcher<DeclStmt>, InnerMatcher) { const DeclStmt* const DeclarationStatement = Node.getConditionVariableDeclStmt(); return DeclarationStatement != nullptr && InnerMatcher.matches(*DeclarationStatement, Finder, Builder); } /// Matches the index expression of an array subscript expression. /// /// Given /// \code /// int i[5]; /// void f() { i[1] = 42; } /// \endcode /// arraySubscriptExpression(hasIndex(integerLiteral())) /// matches \c i[1] with the \c integerLiteral() matching \c 1 AST_MATCHER_P(ArraySubscriptExpr, hasIndex, internal::Matcher<Expr>, InnerMatcher) { if (const Expr* Expression = Node.getIdx()) return InnerMatcher.matches(*Expression, Finder, Builder); return false; } /// Matches the base expression of an array subscript expression. /// /// Given /// \code /// int i[5]; /// void f() { i[1] = 42; } /// \endcode /// arraySubscriptExpression(hasBase(implicitCastExpr( /// hasSourceExpression(declRefExpr())))) /// matches \c i[1] with the \c declRefExpr() matching \c i AST_MATCHER_P(ArraySubscriptExpr, hasBase, internal::Matcher<Expr>, InnerMatcher) { if (const Expr* Expression = Node.getBase()) return InnerMatcher.matches(*Expression, Finder, Builder); return false; } /// Matches a 'for', 'while', 'do while' statement or a function /// definition that has a given body. /// /// Given /// \code /// for (;;) {} /// \endcode /// hasBody(compoundStmt()) /// matches 'for (;;) {}' /// with compoundStmt() /// matching '{}' AST_POLYMORPHIC_MATCHER_P(hasBody, AST_POLYMORPHIC_SUPPORTED_TYPES(DoStmt, ForStmt, WhileStmt, CXXForRangeStmt, FunctionDecl), internal::Matcher<Stmt>, InnerMatcher) { const Stmt *const Statement = internal::GetBodyMatcher<NodeType>::get(Node); return (Statement != nullptr && InnerMatcher.matches(*Statement, Finder, Builder)); } /// Matches compound statements where at least one substatement matches /// a given matcher. Also matches StmtExprs that have CompoundStmt as children. /// /// Given /// \code /// { {}; 1+2; } /// \endcode /// hasAnySubstatement(compoundStmt()) /// matches '{ {}; 1+2; }' /// with compoundStmt() /// matching '{}' AST_POLYMORPHIC_MATCHER_P(hasAnySubstatement, AST_POLYMORPHIC_SUPPORTED_TYPES(CompoundStmt, StmtExpr), internal::Matcher<Stmt>, InnerMatcher) { const CompoundStmt *CS = CompoundStmtMatcher<NodeType>::get(Node); return CS && matchesFirstInPointerRange(InnerMatcher, CS->body_begin(), CS->body_end(), Finder, Builder); } /// Checks that a compound statement contains a specific number of /// child statements. /// /// Example: Given /// \code /// { for (;;) {} } /// \endcode /// compoundStmt(statementCountIs(0))) /// matches '{}' /// but does not match the outer compound statement. AST_MATCHER_P(CompoundStmt, statementCountIs, unsigned, N) { return Node.size() == N; } /// Matches literals that are equal to the given value of type ValueT. /// /// Given /// \code /// f('\0', false, 3.14, 42); /// \endcode /// characterLiteral(equals(0)) /// matches '\0' /// cxxBoolLiteral(equals(false)) and cxxBoolLiteral(equals(0)) /// match false /// floatLiteral(equals(3.14)) and floatLiteral(equals(314e-2)) /// match 3.14 /// integerLiteral(equals(42)) /// matches 42 /// /// Note that you cannot directly match a negative numeric literal because the /// minus sign is not part of the literal: It is a unary operator whose operand /// is the positive numeric literal. Instead, you must use a unaryOperator() /// matcher to match the minus sign: /// /// unaryOperator(hasOperatorName("-"), /// hasUnaryOperand(integerLiteral(equals(13)))) /// /// Usable as: Matcher<CharacterLiteral>, Matcher<CXXBoolLiteralExpr>, /// Matcher<FloatingLiteral>, Matcher<IntegerLiteral> template <typename ValueT> internal::PolymorphicMatcherWithParam1<internal::ValueEqualsMatcher, ValueT> equals(const ValueT &Value) { return internal::PolymorphicMatcherWithParam1< internal::ValueEqualsMatcher, ValueT>(Value); } AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals, AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral, CXXBoolLiteralExpr, IntegerLiteral), bool, Value, 0) { return internal::ValueEqualsMatcher<NodeType, ParamT>(Value) .matchesNode(Node); } AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals, AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral, CXXBoolLiteralExpr, IntegerLiteral), unsigned, Value, 1) { return internal::ValueEqualsMatcher<NodeType, ParamT>(Value) .matchesNode(Node); } AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals, AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral, CXXBoolLiteralExpr, FloatingLiteral, IntegerLiteral), double, Value, 2) { return internal::ValueEqualsMatcher<NodeType, ParamT>(Value) .matchesNode(Node); } /// Matches the operator Name of operator expressions (binary or /// unary). /// /// Example matches a || b (matcher = binaryOperator(hasOperatorName("||"))) /// \code /// !(a || b) /// \endcode AST_POLYMORPHIC_MATCHER_P(hasOperatorName, AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, UnaryOperator), std::string, Name) { return Name == Node.getOpcodeStr(Node.getOpcode()); } /// Matches all kinds of assignment operators. /// /// Example 1: matches a += b (matcher = binaryOperator(isAssignmentOperator())) /// \code /// if (a == b) /// a += b; /// \endcode /// /// Example 2: matches s1 = s2 /// (matcher = cxxOperatorCallExpr(isAssignmentOperator())) /// \code /// struct S { S& operator=(const S&); }; /// void x() { S s1, s2; s1 = s2; }) /// \endcode AST_POLYMORPHIC_MATCHER(isAssignmentOperator, AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, CXXOperatorCallExpr)) { return Node.isAssignmentOp(); } /// Matches the left hand side of binary operator expressions. /// /// Example matches a (matcher = binaryOperator(hasLHS())) /// \code /// a || b /// \endcode AST_POLYMORPHIC_MATCHER_P(hasLHS, AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, ArraySubscriptExpr), internal::Matcher<Expr>, InnerMatcher) { const Expr *LeftHandSide = Node.getLHS(); return (LeftHandSide != nullptr && InnerMatcher.matches(*LeftHandSide, Finder, Builder)); } /// Matches the right hand side of binary operator expressions. /// /// Example matches b (matcher = binaryOperator(hasRHS())) /// \code /// a || b /// \endcode AST_POLYMORPHIC_MATCHER_P(hasRHS, AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, ArraySubscriptExpr), internal::Matcher<Expr>, InnerMatcher) { const Expr *RightHandSide = Node.getRHS(); return (RightHandSide != nullptr && InnerMatcher.matches(*RightHandSide, Finder, Builder)); } /// Matches if either the left hand side or the right hand side of a /// binary operator matches. inline internal::Matcher<BinaryOperator> hasEitherOperand( const internal::Matcher<Expr> &InnerMatcher) { return anyOf(hasLHS(InnerMatcher), hasRHS(InnerMatcher)); } /// Matches if the operand of a unary operator matches. /// /// Example matches true (matcher = hasUnaryOperand( /// cxxBoolLiteral(equals(true)))) /// \code /// !true /// \endcode AST_MATCHER_P(UnaryOperator, hasUnaryOperand, internal::Matcher<Expr>, InnerMatcher) { const Expr * const Operand = Node.getSubExpr(); return (Operand != nullptr && InnerMatcher.matches(*Operand, Finder, Builder)); } /// Matches if the cast's source expression /// or opaque value's source expression matches the given matcher. /// /// Example 1: matches "a string" /// (matcher = castExpr(hasSourceExpression(cxxConstructExpr()))) /// \code /// class URL { URL(string); }; /// URL url = "a string"; /// \endcode /// /// Example 2: matches 'b' (matcher = /// opaqueValueExpr(hasSourceExpression(implicitCastExpr(declRefExpr()))) /// \code /// int a = b ?: 1; /// \endcode AST_POLYMORPHIC_MATCHER_P(hasSourceExpression, AST_POLYMORPHIC_SUPPORTED_TYPES(CastExpr, OpaqueValueExpr), internal::Matcher<Expr>, InnerMatcher) { const Expr *const SubExpression = internal::GetSourceExpressionMatcher<NodeType>::get(Node); return (SubExpression != nullptr && InnerMatcher.matches(*SubExpression, Finder, Builder)); } /// Matches casts that has a given cast kind. /// /// Example: matches the implicit cast around \c 0 /// (matcher = castExpr(hasCastKind(CK_NullToPointer))) /// \code /// int *p = 0; /// \endcode /// /// If the matcher is use from clang-query, CastKind parameter /// should be passed as a quoted string. e.g., ofKind("CK_NullToPointer"). AST_MATCHER_P(CastExpr, hasCastKind, CastKind, Kind) { return Node.getCastKind() == Kind; } /// Matches casts whose destination type matches a given matcher. /// /// (Note: Clang's AST refers to other conversions as "casts" too, and calls /// actual casts "explicit" casts.) AST_MATCHER_P(ExplicitCastExpr, hasDestinationType, internal::Matcher<QualType>, InnerMatcher) { const QualType NodeType = Node.getTypeAsWritten(); return InnerMatcher.matches(NodeType, Finder, Builder); } /// Matches implicit casts whose destination type matches a given /// matcher. /// /// FIXME: Unit test this matcher AST_MATCHER_P(ImplicitCastExpr, hasImplicitDestinationType, internal::Matcher<QualType>, InnerMatcher) { return InnerMatcher.matches(Node.getType(), Finder, Builder); } /// Matches RecordDecl object that are spelled with "struct." /// /// Example matches S, but not C or U. /// \code /// struct S {}; /// class C {}; /// union U {}; /// \endcode AST_MATCHER(RecordDecl, isStruct) { return Node.isStruct(); } /// Matches RecordDecl object that are spelled with "union." /// /// Example matches U, but not C or S. /// \code /// struct S {}; /// class C {}; /// union U {}; /// \endcode AST_MATCHER(RecordDecl, isUnion) { return Node.isUnion(); } /// Matches RecordDecl object that are spelled with "class." /// /// Example matches C, but not S or U. /// \code /// struct S {}; /// class C {}; /// union U {}; /// \endcode AST_MATCHER(RecordDecl, isClass) { return Node.isClass(); } /// Matches the true branch expression of a conditional operator. /// /// Example 1 (conditional ternary operator): matches a /// \code /// condition ? a : b /// \endcode /// /// Example 2 (conditional binary operator): matches opaqueValueExpr(condition) /// \code /// condition ?: b /// \endcode AST_MATCHER_P(AbstractConditionalOperator, hasTrueExpression, internal::Matcher<Expr>, InnerMatcher) { const Expr *Expression = Node.getTrueExpr(); return (Expression != nullptr && InnerMatcher.matches(*Expression, Finder, Builder)); } /// Matches the false branch expression of a conditional operator /// (binary or ternary). /// /// Example matches b /// \code /// condition ? a : b /// condition ?: b /// \endcode AST_MATCHER_P(AbstractConditionalOperator, hasFalseExpression, internal::Matcher<Expr>, InnerMatcher) { const Expr *Expression = Node.getFalseExpr(); return (Expression != nullptr && InnerMatcher.matches(*Expression, Finder, Builder)); } /// Matches if a declaration has a body attached. /// /// Example matches A, va, fa /// \code /// class A {}; /// class B; // Doesn't match, as it has no body. /// int va; /// extern int vb; // Doesn't match, as it doesn't define the variable. /// void fa() {} /// void fb(); // Doesn't match, as it has no body. /// @interface X /// - (void)ma; // Doesn't match, interface is declaration. /// @end /// @implementation X /// - (void)ma {} /// @end /// \endcode /// /// Usable as: Matcher<TagDecl>, Matcher<VarDecl>, Matcher<FunctionDecl>, /// Matcher<ObjCMethodDecl> AST_POLYMORPHIC_MATCHER(isDefinition, AST_POLYMORPHIC_SUPPORTED_TYPES(TagDecl, VarDecl, ObjCMethodDecl, FunctionDecl)) { return Node.isThisDeclarationADefinition(); } /// Matches if a function declaration is variadic. /// /// Example matches f, but not g or h. The function i will not match, even when /// compiled in C mode. /// \code /// void f(...); /// void g(int); /// template <typename... Ts> void h(Ts...); /// void i(); /// \endcode AST_MATCHER(FunctionDecl, isVariadic) { return Node.isVariadic(); } /// Matches the class declaration that the given method declaration /// belongs to. /// /// FIXME: Generalize this for other kinds of declarations. /// FIXME: What other kind of declarations would we need to generalize /// this to? /// /// Example matches A() in the last line /// (matcher = cxxConstructExpr(hasDeclaration(cxxMethodDecl( /// ofClass(hasName("A")))))) /// \code /// class A { /// public: /// A(); /// }; /// A a = A(); /// \endcode AST_MATCHER_P(CXXMethodDecl, ofClass, internal::Matcher<CXXRecordDecl>, InnerMatcher) { const CXXRecordDecl *Parent = Node.getParent(); return (Parent != nullptr && InnerMatcher.matches(*Parent, Finder, Builder)); } /// Matches each method overridden by the given method. This matcher may /// produce multiple matches. /// /// Given /// \code /// class A { virtual void f(); }; /// class B : public A { void f(); }; /// class C : public B { void f(); }; /// \endcode /// cxxMethodDecl(ofClass(hasName("C")), /// forEachOverridden(cxxMethodDecl().bind("b"))).bind("d") /// matches once, with "b" binding "A::f" and "d" binding "C::f" (Note /// that B::f is not overridden by C::f). /// /// The check can produce multiple matches in case of multiple inheritance, e.g. /// \code /// class A1 { virtual void f(); }; /// class A2 { virtual void f(); }; /// class C : public A1, public A2 { void f(); }; /// \endcode /// cxxMethodDecl(ofClass(hasName("C")), /// forEachOverridden(cxxMethodDecl().bind("b"))).bind("d") /// matches twice, once with "b" binding "A1::f" and "d" binding "C::f", and /// once with "b" binding "A2::f" and "d" binding "C::f". AST_MATCHER_P(CXXMethodDecl, forEachOverridden, internal::Matcher<CXXMethodDecl>, InnerMatcher) { BoundNodesTreeBuilder Result; bool Matched = false; for (const auto *Overridden : Node.overridden_methods()) { BoundNodesTreeBuilder OverriddenBuilder(*Builder); const bool OverriddenMatched = InnerMatcher.matches(*Overridden, Finder, &OverriddenBuilder); if (OverriddenMatched) { Matched = true; Result.addMatch(OverriddenBuilder); } } *Builder = std::move(Result); return Matched; } /// Matches if the given method declaration is virtual. /// /// Given /// \code /// class A { /// public: /// virtual void x(); /// }; /// \endcode /// matches A::x AST_MATCHER(CXXMethodDecl, isVirtual) { return Node.isVirtual(); } /// Matches if the given method declaration has an explicit "virtual". /// /// Given /// \code /// class A { /// public: /// virtual void x(); /// }; /// class B : public A { /// public: /// void x(); /// }; /// \endcode /// matches A::x but not B::x AST_MATCHER(CXXMethodDecl, isVirtualAsWritten) { return Node.isVirtualAsWritten(); } /// Matches if the given method or class declaration is final. /// /// Given: /// \code /// class A final {}; /// /// struct B { /// virtual void f(); /// }; /// /// struct C : B { /// void f() final; /// }; /// \endcode /// matches A and C::f, but not B, C, or B::f AST_POLYMORPHIC_MATCHER(isFinal, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, CXXMethodDecl)) { return Node.template hasAttr<FinalAttr>(); } /// Matches if the given method declaration is pure. /// /// Given /// \code /// class A { /// public: /// virtual void x() = 0; /// }; /// \endcode /// matches A::x AST_MATCHER(CXXMethodDecl, isPure) { return Node.isPure(); } /// Matches if the given method declaration is const. /// /// Given /// \code /// struct A { /// void foo() const; /// void bar(); /// }; /// \endcode /// /// cxxMethodDecl(isConst()) matches A::foo() but not A::bar() AST_MATCHER(CXXMethodDecl, isConst) { return Node.isConst(); } /// Matches if the given method declaration declares a copy assignment /// operator. /// /// Given /// \code /// struct A { /// A &operator=(const A &); /// A &operator=(A &&); /// }; /// \endcode /// /// cxxMethodDecl(isCopyAssignmentOperator()) matches the first method but not /// the second one. AST_MATCHER(CXXMethodDecl, isCopyAssignmentOperator) { return Node.isCopyAssignmentOperator(); } /// Matches if the given method declaration declares a move assignment /// operator. /// /// Given /// \code /// struct A { /// A &operator=(const A &); /// A &operator=(A &&); /// }; /// \endcode /// /// cxxMethodDecl(isMoveAssignmentOperator()) matches the second method but not /// the first one. AST_MATCHER(CXXMethodDecl, isMoveAssignmentOperator) { return Node.isMoveAssignmentOperator(); } /// Matches if the given method declaration overrides another method. /// /// Given /// \code /// class A { /// public: /// virtual void x(); /// }; /// class B : public A { /// public: /// virtual void x(); /// }; /// \endcode /// matches B::x AST_MATCHER(CXXMethodDecl, isOverride) { return Node.size_overridden_methods() > 0 || Node.hasAttr<OverrideAttr>(); } /// Matches method declarations that are user-provided. /// /// Given /// \code /// struct S { /// S(); // #1 /// S(const S &) = default; // #2 /// S(S &&) = delete; // #3 /// }; /// \endcode /// cxxConstructorDecl(isUserProvided()) will match #1, but not #2 or #3. AST_MATCHER(CXXMethodDecl, isUserProvided) { return Node.isUserProvided(); } /// Matches member expressions that are called with '->' as opposed /// to '.'. /// /// Member calls on the implicit this pointer match as called with '->'. /// /// Given /// \code /// class Y { /// void x() { this->x(); x(); Y y; y.x(); a; this->b; Y::b; } /// template <class T> void f() { this->f<T>(); f<T>(); } /// int a; /// static int b; /// }; /// template <class T> /// class Z { /// void x() { this->m; } /// }; /// \endcode /// memberExpr(isArrow()) /// matches this->x, x, y.x, a, this->b /// cxxDependentScopeMemberExpr(isArrow()) /// matches this->m /// unresolvedMemberExpr(isArrow()) /// matches this->f<T>, f<T> AST_POLYMORPHIC_MATCHER( isArrow, AST_POLYMORPHIC_SUPPORTED_TYPES(MemberExpr, UnresolvedMemberExpr, CXXDependentScopeMemberExpr)) { return Node.isArrow(); } /// Matches QualType nodes that are of integer type. /// /// Given /// \code /// void a(int); /// void b(long); /// void c(double); /// \endcode /// functionDecl(hasAnyParameter(hasType(isInteger()))) /// matches "a(int)", "b(long)", but not "c(double)". AST_MATCHER(QualType, isInteger) { return Node->isIntegerType(); } /// Matches QualType nodes that are of unsigned integer type. /// /// Given /// \code /// void a(int); /// void b(unsigned long); /// void c(double); /// \endcode /// functionDecl(hasAnyParameter(hasType(isUnsignedInteger()))) /// matches "b(unsigned long)", but not "a(int)" and "c(double)". AST_MATCHER(QualType, isUnsignedInteger) { return Node->isUnsignedIntegerType(); } /// Matches QualType nodes that are of signed integer type. /// /// Given /// \code /// void a(int); /// void b(unsigned long); /// void c(double); /// \endcode /// functionDecl(hasAnyParameter(hasType(isSignedInteger()))) /// matches "a(int)", but not "b(unsigned long)" and "c(double)". AST_MATCHER(QualType, isSignedInteger) { return Node->isSignedIntegerType(); } /// Matches QualType nodes that are of character type. /// /// Given /// \code /// void a(char); /// void b(wchar_t); /// void c(double); /// \endcode /// functionDecl(hasAnyParameter(hasType(isAnyCharacter()))) /// matches "a(char)", "b(wchar_t)", but not "c(double)". AST_MATCHER(QualType, isAnyCharacter) { return Node->isAnyCharacterType(); } /// Matches QualType nodes that are of any pointer type; this includes /// the Objective-C object pointer type, which is different despite being /// syntactically similar. /// /// Given /// \code /// int *i = nullptr; /// /// @interface Foo /// @end /// Foo *f; /// /// int j; /// \endcode /// varDecl(hasType(isAnyPointer())) /// matches "int *i" and "Foo *f", but not "int j". AST_MATCHER(QualType, isAnyPointer) { return Node->isAnyPointerType(); } /// Matches QualType nodes that are const-qualified, i.e., that /// include "top-level" const. /// /// Given /// \code /// void a(int); /// void b(int const); /// void c(const int); /// void d(const int*); /// void e(int const) {}; /// \endcode /// functionDecl(hasAnyParameter(hasType(isConstQualified()))) /// matches "void b(int const)", "void c(const int)" and /// "void e(int const) {}". It does not match d as there /// is no top-level const on the parameter type "const int *". AST_MATCHER(QualType, isConstQualified) { return Node.isConstQualified(); } /// Matches QualType nodes that are volatile-qualified, i.e., that /// include "top-level" volatile. /// /// Given /// \code /// void a(int); /// void b(int volatile); /// void c(volatile int); /// void d(volatile int*); /// void e(int volatile) {}; /// \endcode /// functionDecl(hasAnyParameter(hasType(isVolatileQualified()))) /// matches "void b(int volatile)", "void c(volatile int)" and /// "void e(int volatile) {}". It does not match d as there /// is no top-level volatile on the parameter type "volatile int *". AST_MATCHER(QualType, isVolatileQualified) { return Node.isVolatileQualified(); } /// Matches QualType nodes that have local CV-qualifiers attached to /// the node, not hidden within a typedef. /// /// Given /// \code /// typedef const int const_int; /// const_int i; /// int *const j; /// int *volatile k; /// int m; /// \endcode /// \c varDecl(hasType(hasLocalQualifiers())) matches only \c j and \c k. /// \c i is const-qualified but the qualifier is not local. AST_MATCHER(QualType, hasLocalQualifiers) { return Node.hasLocalQualifiers(); } /// Matches a member expression where the member is matched by a /// given matcher. /// /// Given /// \code /// struct { int first, second; } first, second; /// int i(second.first); /// int j(first.second); /// \endcode /// memberExpr(member(hasName("first"))) /// matches second.first /// but not first.second (because the member name there is "second"). AST_MATCHER_P(MemberExpr, member, internal::Matcher<ValueDecl>, InnerMatcher) { return InnerMatcher.matches(*Node.getMemberDecl(), Finder, Builder); } /// Matches a member expression where the object expression is matched by a /// given matcher. Implicit object expressions are included; that is, it matches /// use of implicit `this`. /// /// Given /// \code /// struct X { /// int m; /// int f(X x) { x.m; return m; } /// }; /// \endcode /// memberExpr(hasObjectExpression(hasType(cxxRecordDecl(hasName("X"))))) /// matches `x.m`, but not `m`; however, /// memberExpr(hasObjectExpression(hasType(pointsTo( // cxxRecordDecl(hasName("X")))))) /// matches `m` (aka. `this->m`), but not `x.m`. AST_POLYMORPHIC_MATCHER_P( hasObjectExpression, AST_POLYMORPHIC_SUPPORTED_TYPES(MemberExpr, UnresolvedMemberExpr, CXXDependentScopeMemberExpr), internal::Matcher<Expr>, InnerMatcher) { if (const auto *E = dyn_cast<UnresolvedMemberExpr>(&Node)) if (E->isImplicitAccess()) return false; if (const auto *E = dyn_cast<CXXDependentScopeMemberExpr>(&Node)) if (E->isImplicitAccess()) return false; return InnerMatcher.matches(*Node.getBase(), Finder, Builder); } /// Matches any using shadow declaration. /// /// Given /// \code /// namespace X { void b(); } /// using X::b; /// \endcode /// usingDecl(hasAnyUsingShadowDecl(hasName("b")))) /// matches \code using X::b \endcode AST_MATCHER_P(UsingDecl, hasAnyUsingShadowDecl, internal::Matcher<UsingShadowDecl>, InnerMatcher) { return matchesFirstInPointerRange(InnerMatcher, Node.shadow_begin(), Node.shadow_end(), Finder, Builder); } /// Matches a using shadow declaration where the target declaration is /// matched by the given matcher. /// /// Given /// \code /// namespace X { int a; void b(); } /// using X::a; /// using X::b; /// \endcode /// usingDecl(hasAnyUsingShadowDecl(hasTargetDecl(functionDecl()))) /// matches \code using X::b \endcode /// but not \code using X::a \endcode AST_MATCHER_P(UsingShadowDecl, hasTargetDecl, internal::Matcher<NamedDecl>, InnerMatcher) { return InnerMatcher.matches(*Node.getTargetDecl(), Finder, Builder); } /// Matches template instantiations of function, class, or static /// member variable template instantiations. /// /// Given /// \code /// template <typename T> class X {}; class A {}; X<A> x; /// \endcode /// or /// \code /// template <typename T> class X {}; class A {}; template class X<A>; /// \endcode /// or /// \code /// template <typename T> class X {}; class A {}; extern template class X<A>; /// \endcode /// cxxRecordDecl(hasName("::X"), isTemplateInstantiation()) /// matches the template instantiation of X<A>. /// /// But given /// \code /// template <typename T> class X {}; class A {}; /// template <> class X<A> {}; X<A> x; /// \endcode /// cxxRecordDecl(hasName("::X"), isTemplateInstantiation()) /// does not match, as X<A> is an explicit template specialization. /// /// Usable as: Matcher<FunctionDecl>, Matcher<VarDecl>, Matcher<CXXRecordDecl> AST_POLYMORPHIC_MATCHER(isTemplateInstantiation, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl, CXXRecordDecl)) { return (Node.getTemplateSpecializationKind() == TSK_ImplicitInstantiation || Node.getTemplateSpecializationKind() == TSK_ExplicitInstantiationDefinition || Node.getTemplateSpecializationKind() == TSK_ExplicitInstantiationDeclaration); } /// Matches declarations that are template instantiations or are inside /// template instantiations. /// /// Given /// \code /// template<typename T> void A(T t) { T i; } /// A(0); /// A(0U); /// \endcode /// functionDecl(isInstantiated()) /// matches 'A(int) {...};' and 'A(unsigned) {...}'. AST_MATCHER_FUNCTION(internal::Matcher<Decl>, isInstantiated) { auto IsInstantiation = decl(anyOf(cxxRecordDecl(isTemplateInstantiation()), functionDecl(isTemplateInstantiation()))); return decl(anyOf(IsInstantiation, hasAncestor(IsInstantiation))); } /// Matches statements inside of a template instantiation. /// /// Given /// \code /// int j; /// template<typename T> void A(T t) { T i; j += 42;} /// A(0); /// A(0U); /// \endcode /// declStmt(isInTemplateInstantiation()) /// matches 'int i;' and 'unsigned i'. /// unless(stmt(isInTemplateInstantiation())) /// will NOT match j += 42; as it's shared between the template definition and /// instantiation. AST_MATCHER_FUNCTION(internal::Matcher<Stmt>, isInTemplateInstantiation) { return stmt( hasAncestor(decl(anyOf(cxxRecordDecl(isTemplateInstantiation()), functionDecl(isTemplateInstantiation()))))); } /// Matches explicit template specializations of function, class, or /// static member variable template instantiations. /// /// Given /// \code /// template<typename T> void A(T t) { } /// template<> void A(int N) { } /// \endcode /// functionDecl(isExplicitTemplateSpecialization()) /// matches the specialization A<int>(). /// /// Usable as: Matcher<FunctionDecl>, Matcher<VarDecl>, Matcher<CXXRecordDecl> AST_POLYMORPHIC_MATCHER(isExplicitTemplateSpecialization, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl, CXXRecordDecl)) { return (Node.getTemplateSpecializationKind() == TSK_ExplicitSpecialization); } /// Matches \c TypeLocs for which the given inner /// QualType-matcher matches. AST_MATCHER_FUNCTION_P_OVERLOAD(internal::BindableMatcher<TypeLoc>, loc, internal::Matcher<QualType>, InnerMatcher, 0) { return internal::BindableMatcher<TypeLoc>( new internal::TypeLocTypeMatcher(InnerMatcher)); } /// Matches type \c bool. /// /// Given /// \code /// struct S { bool func(); }; /// \endcode /// functionDecl(returns(booleanType())) /// matches "bool func();" AST_MATCHER(Type, booleanType) { return Node.isBooleanType(); } /// Matches type \c void. /// /// Given /// \code /// struct S { void func(); }; /// \endcode /// functionDecl(returns(voidType())) /// matches "void func();" AST_MATCHER(Type, voidType) { return Node.isVoidType(); } template <typename NodeType> using AstTypeMatcher = internal::VariadicDynCastAllOfMatcher<Type, NodeType>; /// Matches builtin Types. /// /// Given /// \code /// struct A {}; /// A a; /// int b; /// float c; /// bool d; /// \endcode /// builtinType() /// matches "int b", "float c" and "bool d" extern const AstTypeMatcher<BuiltinType> builtinType; /// Matches all kinds of arrays. /// /// Given /// \code /// int a[] = { 2, 3 }; /// int b[4]; /// void f() { int c[a[0]]; } /// \endcode /// arrayType() /// matches "int a[]", "int b[4]" and "int c[a[0]]"; extern const AstTypeMatcher<ArrayType> arrayType; /// Matches C99 complex types. /// /// Given /// \code /// _Complex float f; /// \endcode /// complexType() /// matches "_Complex float f" extern const AstTypeMatcher<ComplexType> complexType; /// Matches any real floating-point type (float, double, long double). /// /// Given /// \code /// int i; /// float f; /// \endcode /// realFloatingPointType() /// matches "float f" but not "int i" AST_MATCHER(Type, realFloatingPointType) { return Node.isRealFloatingType(); } /// Matches arrays and C99 complex types that have a specific element /// type. /// /// Given /// \code /// struct A {}; /// A a[7]; /// int b[7]; /// \endcode /// arrayType(hasElementType(builtinType())) /// matches "int b[7]" /// /// Usable as: Matcher<ArrayType>, Matcher<ComplexType> AST_TYPELOC_TRAVERSE_MATCHER_DECL(hasElementType, getElement, AST_POLYMORPHIC_SUPPORTED_TYPES(ArrayType, ComplexType)); /// Matches C arrays with a specified constant size. /// /// Given /// \code /// void() { /// int a[2]; /// int b[] = { 2, 3 }; /// int c[b[0]]; /// } /// \endcode /// constantArrayType() /// matches "int a[2]" extern const AstTypeMatcher<ConstantArrayType> constantArrayType; /// Matches nodes that have the specified size. /// /// Given /// \code /// int a[42]; /// int b[2 * 21]; /// int c[41], d[43]; /// char *s = "abcd"; /// wchar_t *ws = L"abcd"; /// char *w = "a"; /// \endcode /// constantArrayType(hasSize(42)) /// matches "int a[42]" and "int b[2 * 21]" /// stringLiteral(hasSize(4)) /// matches "abcd", L"abcd" AST_POLYMORPHIC_MATCHER_P(hasSize, AST_POLYMORPHIC_SUPPORTED_TYPES(ConstantArrayType, StringLiteral), unsigned, N) { return internal::HasSizeMatcher<NodeType>::hasSize(Node, N); } /// Matches C++ arrays whose size is a value-dependent expression. /// /// Given /// \code /// template<typename T, int Size> /// class array { /// T data[Size]; /// }; /// \endcode /// dependentSizedArrayType /// matches "T data[Size]" extern const AstTypeMatcher<DependentSizedArrayType> dependentSizedArrayType; /// Matches C arrays with unspecified size. /// /// Given /// \code /// int a[] = { 2, 3 }; /// int b[42]; /// void f(int c[]) { int d[a[0]]; }; /// \endcode /// incompleteArrayType() /// matches "int a[]" and "int c[]" extern const AstTypeMatcher<IncompleteArrayType> incompleteArrayType; /// Matches C arrays with a specified size that is not an /// integer-constant-expression. /// /// Given /// \code /// void f() { /// int a[] = { 2, 3 } /// int b[42]; /// int c[a[0]]; /// } /// \endcode /// variableArrayType() /// matches "int c[a[0]]" extern const AstTypeMatcher<VariableArrayType> variableArrayType; /// Matches \c VariableArrayType nodes that have a specific size /// expression. /// /// Given /// \code /// void f(int b) { /// int a[b]; /// } /// \endcode /// variableArrayType(hasSizeExpr(ignoringImpCasts(declRefExpr(to( /// varDecl(hasName("b"))))))) /// matches "int a[b]" AST_MATCHER_P(VariableArrayType, hasSizeExpr, internal::Matcher<Expr>, InnerMatcher) { return InnerMatcher.matches(*Node.getSizeExpr(), Finder, Builder); } /// Matches atomic types. /// /// Given /// \code /// _Atomic(int) i; /// \endcode /// atomicType() /// matches "_Atomic(int) i" extern const AstTypeMatcher<AtomicType> atomicType; /// Matches atomic types with a specific value type. /// /// Given /// \code /// _Atomic(int) i; /// _Atomic(float) f; /// \endcode /// atomicType(hasValueType(isInteger())) /// matches "_Atomic(int) i" /// /// Usable as: Matcher<AtomicType> AST_TYPELOC_TRAVERSE_MATCHER_DECL(hasValueType, getValue, AST_POLYMORPHIC_SUPPORTED_TYPES(AtomicType)); /// Matches types nodes representing C++11 auto types. /// /// Given: /// \code /// auto n = 4; /// int v[] = { 2, 3 } /// for (auto i : v) { } /// \endcode /// autoType() /// matches "auto n" and "auto i" extern const AstTypeMatcher<AutoType> autoType; /// Matches types nodes representing C++11 decltype(<expr>) types. /// /// Given: /// \code /// short i = 1; /// int j = 42; /// decltype(i + j) result = i + j; /// \endcode /// decltypeType() /// matches "decltype(i + j)" extern const AstTypeMatcher<DecltypeType> decltypeType; /// Matches \c AutoType nodes where the deduced type is a specific type. /// /// Note: There is no \c TypeLoc for the deduced type and thus no /// \c getDeducedLoc() matcher. /// /// Given /// \code /// auto a = 1; /// auto b = 2.0; /// \endcode /// autoType(hasDeducedType(isInteger())) /// matches "auto a" /// /// Usable as: Matcher<AutoType> AST_TYPE_TRAVERSE_MATCHER(hasDeducedType, getDeducedType, AST_POLYMORPHIC_SUPPORTED_TYPES(AutoType)); /// Matches \c DecltypeType nodes to find out the underlying type. /// /// Given /// \code /// decltype(1) a = 1; /// decltype(2.0) b = 2.0; /// \endcode /// decltypeType(hasUnderlyingType(isInteger())) /// matches the type of "a" /// /// Usable as: Matcher<DecltypeType> AST_TYPE_TRAVERSE_MATCHER(hasUnderlyingType, getUnderlyingType, AST_POLYMORPHIC_SUPPORTED_TYPES(DecltypeType)); /// Matches \c FunctionType nodes. /// /// Given /// \code /// int (*f)(int); /// void g(); /// \endcode /// functionType() /// matches "int (*f)(int)" and the type of "g". extern const AstTypeMatcher<FunctionType> functionType; /// Matches \c FunctionProtoType nodes. /// /// Given /// \code /// int (*f)(int); /// void g(); /// \endcode /// functionProtoType() /// matches "int (*f)(int)" and the type of "g" in C++ mode. /// In C mode, "g" is not matched because it does not contain a prototype. extern const AstTypeMatcher<FunctionProtoType> functionProtoType; /// Matches \c ParenType nodes. /// /// Given /// \code /// int (*ptr_to_array)[4]; /// int *array_of_ptrs[4]; /// \endcode /// /// \c varDecl(hasType(pointsTo(parenType()))) matches \c ptr_to_array but not /// \c array_of_ptrs. extern const AstTypeMatcher<ParenType> parenType; /// Matches \c ParenType nodes where the inner type is a specific type. /// /// Given /// \code /// int (*ptr_to_array)[4]; /// int (*ptr_to_func)(int); /// \endcode /// /// \c varDecl(hasType(pointsTo(parenType(innerType(functionType()))))) matches /// \c ptr_to_func but not \c ptr_to_array. /// /// Usable as: Matcher<ParenType> AST_TYPE_TRAVERSE_MATCHER(innerType, getInnerType, AST_POLYMORPHIC_SUPPORTED_TYPES(ParenType)); /// Matches block pointer types, i.e. types syntactically represented as /// "void (^)(int)". /// /// The \c pointee is always required to be a \c FunctionType. extern const AstTypeMatcher<BlockPointerType> blockPointerType; /// Matches member pointer types. /// Given /// \code /// struct A { int i; } /// A::* ptr = A::i; /// \endcode /// memberPointerType() /// matches "A::* ptr" extern const AstTypeMatcher<MemberPointerType> memberPointerType; /// Matches pointer types, but does not match Objective-C object pointer /// types. /// /// Given /// \code /// int *a; /// int &b = *a; /// int c = 5; /// /// @interface Foo /// @end /// Foo *f; /// \endcode /// pointerType() /// matches "int *a", but does not match "Foo *f". extern const AstTypeMatcher<PointerType> pointerType; /// Matches an Objective-C object pointer type, which is different from /// a pointer type, despite being syntactically similar. /// /// Given /// \code /// int *a; /// /// @interface Foo /// @end /// Foo *f; /// \endcode /// pointerType() /// matches "Foo *f", but does not match "int *a". extern const AstTypeMatcher<ObjCObjectPointerType> objcObjectPointerType; /// Matches both lvalue and rvalue reference types. /// /// Given /// \code /// int *a; /// int &b = *a; /// int &&c = 1; /// auto &d = b; /// auto &&e = c; /// auto &&f = 2; /// int g = 5; /// \endcode /// /// \c referenceType() matches the types of \c b, \c c, \c d, \c e, and \c f. extern const AstTypeMatcher<ReferenceType> referenceType; /// Matches lvalue reference types. /// /// Given: /// \code /// int *a; /// int &b = *a; /// int &&c = 1; /// auto &d = b; /// auto &&e = c; /// auto &&f = 2; /// int g = 5; /// \endcode /// /// \c lValueReferenceType() matches the types of \c b, \c d, and \c e. \c e is /// matched since the type is deduced as int& by reference collapsing rules. extern const AstTypeMatcher<LValueReferenceType> lValueReferenceType; /// Matches rvalue reference types. /// /// Given: /// \code /// int *a; /// int &b = *a; /// int &&c = 1; /// auto &d = b; /// auto &&e = c; /// auto &&f = 2; /// int g = 5; /// \endcode /// /// \c rValueReferenceType() matches the types of \c c and \c f. \c e is not /// matched as it is deduced to int& by reference collapsing rules. extern const AstTypeMatcher<RValueReferenceType> rValueReferenceType; /// Narrows PointerType (and similar) matchers to those where the /// \c pointee matches a given matcher. /// /// Given /// \code /// int *a; /// int const *b; /// float const *f; /// \endcode /// pointerType(pointee(isConstQualified(), isInteger())) /// matches "int const *b" /// /// Usable as: Matcher<BlockPointerType>, Matcher<MemberPointerType>, /// Matcher<PointerType>, Matcher<ReferenceType> AST_TYPELOC_TRAVERSE_MATCHER_DECL( pointee, getPointee, AST_POLYMORPHIC_SUPPORTED_TYPES(BlockPointerType, MemberPointerType, PointerType, ReferenceType)); /// Matches typedef types. /// /// Given /// \code /// typedef int X; /// \endcode /// typedefType() /// matches "typedef int X" extern const AstTypeMatcher<TypedefType> typedefType; /// Matches enum types. /// /// Given /// \code /// enum C { Green }; /// enum class S { Red }; /// /// C c; /// S s; /// \endcode // /// \c enumType() matches the type of the variable declarations of both \c c and /// \c s. extern const AstTypeMatcher<EnumType> enumType; /// Matches template specialization types. /// /// Given /// \code /// template <typename T> /// class C { }; /// /// template class C<int>; // A /// C<char> var; // B /// \endcode /// /// \c templateSpecializationType() matches the type of the explicit /// instantiation in \c A and the type of the variable declaration in \c B. extern const AstTypeMatcher<TemplateSpecializationType> templateSpecializationType; /// Matches types nodes representing unary type transformations. /// /// Given: /// \code /// typedef __underlying_type(T) type; /// \endcode /// unaryTransformType() /// matches "__underlying_type(T)" extern const AstTypeMatcher<UnaryTransformType> unaryTransformType; /// Matches record types (e.g. structs, classes). /// /// Given /// \code /// class C {}; /// struct S {}; /// /// C c; /// S s; /// \endcode /// /// \c recordType() matches the type of the variable declarations of both \c c /// and \c s. extern const AstTypeMatcher<RecordType> recordType; /// Matches tag types (record and enum types). /// /// Given /// \code /// enum E {}; /// class C {}; /// /// E e; /// C c; /// \endcode /// /// \c tagType() matches the type of the variable declarations of both \c e /// and \c c. extern const AstTypeMatcher<TagType> tagType; /// Matches types specified with an elaborated type keyword or with a /// qualified name. /// /// Given /// \code /// namespace N { /// namespace M { /// class D {}; /// } /// } /// class C {}; /// /// class C c; /// N::M::D d; /// \endcode /// /// \c elaboratedType() matches the type of the variable declarations of both /// \c c and \c d. extern const AstTypeMatcher<ElaboratedType> elaboratedType; /// Matches ElaboratedTypes whose qualifier, a NestedNameSpecifier, /// matches \c InnerMatcher if the qualifier exists. /// /// Given /// \code /// namespace N { /// namespace M { /// class D {}; /// } /// } /// N::M::D d; /// \endcode /// /// \c elaboratedType(hasQualifier(hasPrefix(specifiesNamespace(hasName("N")))) /// matches the type of the variable declaration of \c d. AST_MATCHER_P(ElaboratedType, hasQualifier, internal::Matcher<NestedNameSpecifier>, InnerMatcher) { if (const NestedNameSpecifier *Qualifier = Node.getQualifier()) return InnerMatcher.matches(*Qualifier, Finder, Builder); return false; } /// Matches ElaboratedTypes whose named type matches \c InnerMatcher. /// /// Given /// \code /// namespace N { /// namespace M { /// class D {}; /// } /// } /// N::M::D d; /// \endcode /// /// \c elaboratedType(namesType(recordType( /// hasDeclaration(namedDecl(hasName("D")))))) matches the type of the variable /// declaration of \c d. AST_MATCHER_P(ElaboratedType, namesType, internal::Matcher<QualType>, InnerMatcher) { return InnerMatcher.matches(Node.getNamedType(), Finder, Builder); } /// Matches types that represent the result of substituting a type for a /// template type parameter. /// /// Given /// \code /// template <typename T> /// void F(T t) { /// int i = 1 + t; /// } /// \endcode /// /// \c substTemplateTypeParmType() matches the type of 't' but not '1' extern const AstTypeMatcher<SubstTemplateTypeParmType> substTemplateTypeParmType; /// Matches template type parameter substitutions that have a replacement /// type that matches the provided matcher. /// /// Given /// \code /// template <typename T> /// double F(T t); /// int i; /// double j = F(i); /// \endcode /// /// \c substTemplateTypeParmType(hasReplacementType(type())) matches int AST_TYPE_TRAVERSE_MATCHER( hasReplacementType, getReplacementType, AST_POLYMORPHIC_SUPPORTED_TYPES(SubstTemplateTypeParmType)); /// Matches template type parameter types. /// /// Example matches T, but not int. /// (matcher = templateTypeParmType()) /// \code /// template <typename T> void f(int i); /// \endcode extern const AstTypeMatcher<TemplateTypeParmType> templateTypeParmType; /// Matches injected class name types. /// /// Example matches S s, but not S<T> s. /// (matcher = parmVarDecl(hasType(injectedClassNameType()))) /// \code /// template <typename T> struct S { /// void f(S s); /// void g(S<T> s); /// }; /// \endcode extern const AstTypeMatcher<InjectedClassNameType> injectedClassNameType; /// Matches decayed type /// Example matches i[] in declaration of f. /// (matcher = valueDecl(hasType(decayedType(hasDecayedType(pointerType()))))) /// Example matches i[1]. /// (matcher = expr(hasType(decayedType(hasDecayedType(pointerType()))))) /// \code /// void f(int i[]) { /// i[1] = 0; /// } /// \endcode extern const AstTypeMatcher<DecayedType> decayedType; /// Matches the decayed type, whos decayed type matches \c InnerMatcher AST_MATCHER_P(DecayedType, hasDecayedType, internal::Matcher<QualType>, InnerType) { return InnerType.matches(Node.getDecayedType(), Finder, Builder); } /// Matches declarations whose declaration context, interpreted as a /// Decl, matches \c InnerMatcher. /// /// Given /// \code /// namespace N { /// namespace M { /// class D {}; /// } /// } /// \endcode /// /// \c cxxRcordDecl(hasDeclContext(namedDecl(hasName("M")))) matches the /// declaration of \c class \c D. AST_MATCHER_P(Decl, hasDeclContext, internal::Matcher<Decl>, InnerMatcher) { const DeclContext *DC = Node.getDeclContext(); if (!DC) return false; return InnerMatcher.matches(*Decl::castFromDeclContext(DC), Finder, Builder); } /// Matches nested name specifiers. /// /// Given /// \code /// namespace ns { /// struct A { static void f(); }; /// void A::f() {} /// void g() { A::f(); } /// } /// ns::A a; /// \endcode /// nestedNameSpecifier() /// matches "ns::" and both "A::" extern const internal::VariadicAllOfMatcher<NestedNameSpecifier> nestedNameSpecifier; /// Same as \c nestedNameSpecifier but matches \c NestedNameSpecifierLoc. extern const internal::VariadicAllOfMatcher<NestedNameSpecifierLoc> nestedNameSpecifierLoc; /// Matches \c NestedNameSpecifierLocs for which the given inner /// NestedNameSpecifier-matcher matches. AST_MATCHER_FUNCTION_P_OVERLOAD( internal::BindableMatcher<NestedNameSpecifierLoc>, loc, internal::Matcher<NestedNameSpecifier>, InnerMatcher, 1) { return internal::BindableMatcher<NestedNameSpecifierLoc>( new internal::LocMatcher<NestedNameSpecifierLoc, NestedNameSpecifier>( InnerMatcher)); } /// Matches nested name specifiers that specify a type matching the /// given \c QualType matcher without qualifiers. /// /// Given /// \code /// struct A { struct B { struct C {}; }; }; /// A::B::C c; /// \endcode /// nestedNameSpecifier(specifiesType( /// hasDeclaration(cxxRecordDecl(hasName("A"))) /// )) /// matches "A::" AST_MATCHER_P(NestedNameSpecifier, specifiesType, internal::Matcher<QualType>, InnerMatcher) { if (!Node.getAsType()) return false; return InnerMatcher.matches(QualType(Node.getAsType(), 0), Finder, Builder); } /// Matches nested name specifier locs that specify a type matching the /// given \c TypeLoc. /// /// Given /// \code /// struct A { struct B { struct C {}; }; }; /// A::B::C c; /// \endcode /// nestedNameSpecifierLoc(specifiesTypeLoc(loc(type( /// hasDeclaration(cxxRecordDecl(hasName("A"))))))) /// matches "A::" AST_MATCHER_P(NestedNameSpecifierLoc, specifiesTypeLoc, internal::Matcher<TypeLoc>, InnerMatcher) { return Node && Node.getNestedNameSpecifier()->getAsType() && InnerMatcher.matches(Node.getTypeLoc(), Finder, Builder); } /// Matches on the prefix of a \c NestedNameSpecifier. /// /// Given /// \code /// struct A { struct B { struct C {}; }; }; /// A::B::C c; /// \endcode /// nestedNameSpecifier(hasPrefix(specifiesType(asString("struct A")))) and /// matches "A::" AST_MATCHER_P_OVERLOAD(NestedNameSpecifier, hasPrefix, internal::Matcher<NestedNameSpecifier>, InnerMatcher, 0) { const NestedNameSpecifier *NextNode = Node.getPrefix(); if (!NextNode) return false; return InnerMatcher.matches(*NextNode, Finder, Builder); } /// Matches on the prefix of a \c NestedNameSpecifierLoc. /// /// Given /// \code /// struct A { struct B { struct C {}; }; }; /// A::B::C c; /// \endcode /// nestedNameSpecifierLoc(hasPrefix(loc(specifiesType(asString("struct A"))))) /// matches "A::" AST_MATCHER_P_OVERLOAD(NestedNameSpecifierLoc, hasPrefix, internal::Matcher<NestedNameSpecifierLoc>, InnerMatcher, 1) { NestedNameSpecifierLoc NextNode = Node.getPrefix(); if (!NextNode) return false; return InnerMatcher.matches(NextNode, Finder, Builder); } /// Matches nested name specifiers that specify a namespace matching the /// given namespace matcher. /// /// Given /// \code /// namespace ns { struct A {}; } /// ns::A a; /// \endcode /// nestedNameSpecifier(specifiesNamespace(hasName("ns"))) /// matches "ns::" AST_MATCHER_P(NestedNameSpecifier, specifiesNamespace, internal::Matcher<NamespaceDecl>, InnerMatcher) { if (!Node.getAsNamespace()) return false; return InnerMatcher.matches(*Node.getAsNamespace(), Finder, Builder); } /// Overloads for the \c equalsNode matcher. /// FIXME: Implement for other node types. /// @{ /// Matches if a node equals another node. /// /// \c Decl has pointer identity in the AST. AST_MATCHER_P_OVERLOAD(Decl, equalsNode, const Decl*, Other, 0) { return &Node == Other; } /// Matches if a node equals another node. /// /// \c Stmt has pointer identity in the AST. AST_MATCHER_P_OVERLOAD(Stmt, equalsNode, const Stmt*, Other, 1) { return &Node == Other; } /// Matches if a node equals another node. /// /// \c Type has pointer identity in the AST. AST_MATCHER_P_OVERLOAD(Type, equalsNode, const Type*, Other, 2) { return &Node == Other; } /// @} /// Matches each case or default statement belonging to the given switch /// statement. This matcher may produce multiple matches. /// /// Given /// \code /// switch (1) { case 1: case 2: default: switch (2) { case 3: case 4: ; } } /// \endcode /// switchStmt(forEachSwitchCase(caseStmt().bind("c"))).bind("s") /// matches four times, with "c" binding each of "case 1:", "case 2:", /// "case 3:" and "case 4:", and "s" respectively binding "switch (1)", /// "switch (1)", "switch (2)" and "switch (2)". AST_MATCHER_P(SwitchStmt, forEachSwitchCase, internal::Matcher<SwitchCase>, InnerMatcher) { BoundNodesTreeBuilder Result; // FIXME: getSwitchCaseList() does not necessarily guarantee a stable // iteration order. We should use the more general iterating matchers once // they are capable of expressing this matcher (for example, it should ignore // case statements belonging to nested switch statements). bool Matched = false; for (const SwitchCase *SC = Node.getSwitchCaseList(); SC; SC = SC->getNextSwitchCase()) { BoundNodesTreeBuilder CaseBuilder(*Builder); bool CaseMatched = InnerMatcher.matches(*SC, Finder, &CaseBuilder); if (CaseMatched) { Matched = true; Result.addMatch(CaseBuilder); } } *Builder = std::move(Result); return Matched; } /// Matches each constructor initializer in a constructor definition. /// /// Given /// \code /// class A { A() : i(42), j(42) {} int i; int j; }; /// \endcode /// cxxConstructorDecl(forEachConstructorInitializer( /// forField(decl().bind("x")) /// )) /// will trigger two matches, binding for 'i' and 'j' respectively. AST_MATCHER_P(CXXConstructorDecl, forEachConstructorInitializer, internal::Matcher<CXXCtorInitializer>, InnerMatcher) { BoundNodesTreeBuilder Result; bool Matched = false; for (const auto *I : Node.inits()) { BoundNodesTreeBuilder InitBuilder(*Builder); if (InnerMatcher.matches(*I, Finder, &InitBuilder)) { Matched = true; Result.addMatch(InitBuilder); } } *Builder = std::move(Result); return Matched; } /// Matches constructor declarations that are copy constructors. /// /// Given /// \code /// struct S { /// S(); // #1 /// S(const S &); // #2 /// S(S &&); // #3 /// }; /// \endcode /// cxxConstructorDecl(isCopyConstructor()) will match #2, but not #1 or #3. AST_MATCHER(CXXConstructorDecl, isCopyConstructor) { return Node.isCopyConstructor(); } /// Matches constructor declarations that are move constructors. /// /// Given /// \code /// struct S { /// S(); // #1 /// S(const S &); // #2 /// S(S &&); // #3 /// }; /// \endcode /// cxxConstructorDecl(isMoveConstructor()) will match #3, but not #1 or #2. AST_MATCHER(CXXConstructorDecl, isMoveConstructor) { return Node.isMoveConstructor(); } /// Matches constructor declarations that are default constructors. /// /// Given /// \code /// struct S { /// S(); // #1 /// S(const S &); // #2 /// S(S &&); // #3 /// }; /// \endcode /// cxxConstructorDecl(isDefaultConstructor()) will match #1, but not #2 or #3. AST_MATCHER(CXXConstructorDecl, isDefaultConstructor) { return Node.isDefaultConstructor(); } /// Matches constructors that delegate to another constructor. /// /// Given /// \code /// struct S { /// S(); // #1 /// S(int) {} // #2 /// S(S &&) : S() {} // #3 /// }; /// S::S() : S(0) {} // #4 /// \endcode /// cxxConstructorDecl(isDelegatingConstructor()) will match #3 and #4, but not /// #1 or #2. AST_MATCHER(CXXConstructorDecl, isDelegatingConstructor) { return Node.isDelegatingConstructor(); } /// Matches constructor, conversion function, and deduction guide declarations /// that have an explicit specifier if this explicit specifier is resolved to /// true. /// /// Given /// \code /// template<bool b> /// struct S { /// S(int); // #1 /// explicit S(double); // #2 /// operator int(); // #3 /// explicit operator bool(); // #4 /// explicit(false) S(bool) // # 7 /// explicit(true) S(char) // # 8 /// explicit(b) S(S) // # 9 /// }; /// S(int) -> S<true> // #5 /// explicit S(double) -> S<false> // #6 /// \endcode /// cxxConstructorDecl(isExplicit()) will match #2 and #8, but not #1, #7 or #9. /// cxxConversionDecl(isExplicit()) will match #4, but not #3. /// cxxDeductionGuideDecl(isExplicit()) will match #6, but not #5. AST_POLYMORPHIC_MATCHER(isExplicit, AST_POLYMORPHIC_SUPPORTED_TYPES( CXXConstructorDecl, CXXConversionDecl, CXXDeductionGuideDecl)) { return Node.isExplicit(); } /// Matches the expression in an explicit specifier if present in the given /// declaration. /// /// Given /// \code /// template<bool b> /// struct S { /// S(int); // #1 /// explicit S(double); // #2 /// operator int(); // #3 /// explicit operator bool(); // #4 /// explicit(false) S(bool) // # 7 /// explicit(true) S(char) // # 8 /// explicit(b) S(S) // # 9 /// }; /// S(int) -> S<true> // #5 /// explicit S(double) -> S<false> // #6 /// \endcode /// cxxConstructorDecl(hasExplicitSpecifier(constantExpr())) will match #7, #8 and #9, but not #1 or #2. /// cxxConversionDecl(hasExplicitSpecifier(constantExpr())) will not match #3 or #4. /// cxxDeductionGuideDecl(hasExplicitSpecifier(constantExpr())) will not match #5 or #6. AST_MATCHER_P(FunctionDecl, hasExplicitSpecifier, internal::Matcher<Expr>, InnerMatcher) { ExplicitSpecifier ES = ExplicitSpecifier::getFromDecl(&Node); if (!ES.getExpr()) return false; return InnerMatcher.matches(*ES.getExpr(), Finder, Builder); } /// Matches function and namespace declarations that are marked with /// the inline keyword. /// /// Given /// \code /// inline void f(); /// void g(); /// namespace n { /// inline namespace m {} /// } /// \endcode /// functionDecl(isInline()) will match ::f(). /// namespaceDecl(isInline()) will match n::m. AST_POLYMORPHIC_MATCHER(isInline, AST_POLYMORPHIC_SUPPORTED_TYPES(NamespaceDecl, FunctionDecl)) { // This is required because the spelling of the function used to determine // whether inline is specified or not differs between the polymorphic types. if (const auto *FD = dyn_cast<FunctionDecl>(&Node)) return FD->isInlineSpecified(); else if (const auto *NSD = dyn_cast<NamespaceDecl>(&Node)) return NSD->isInline(); llvm_unreachable("Not a valid polymorphic type"); } /// Matches anonymous namespace declarations. /// /// Given /// \code /// namespace n { /// namespace {} // #1 /// } /// \endcode /// namespaceDecl(isAnonymous()) will match #1 but not ::n. AST_MATCHER(NamespaceDecl, isAnonymous) { return Node.isAnonymousNamespace(); } /// Matches declarations in the namespace `std`, but not in nested namespaces. /// /// Given /// \code /// class vector {}; /// namespace foo { /// class vector {}; /// namespace std { /// class vector {}; /// } /// } /// namespace std { /// inline namespace __1 { /// class vector {}; // #1 /// namespace experimental { /// class vector {}; /// } /// } /// } /// \endcode /// cxxRecordDecl(hasName("vector"), isInStdNamespace()) will match only #1. AST_MATCHER(Decl, isInStdNamespace) { return Node.isInStdNamespace(); } /// If the given case statement does not use the GNU case range /// extension, matches the constant given in the statement. /// /// Given /// \code /// switch (1) { case 1: case 1+1: case 3 ... 4: ; } /// \endcode /// caseStmt(hasCaseConstant(integerLiteral())) /// matches "case 1:" AST_MATCHER_P(CaseStmt, hasCaseConstant, internal::Matcher<Expr>, InnerMatcher) { if (Node.getRHS()) return false; return InnerMatcher.matches(*Node.getLHS(), Finder, Builder); } /// Matches declaration that has a given attribute. /// /// Given /// \code /// __attribute__((device)) void f() { ... } /// \endcode /// decl(hasAttr(clang::attr::CUDADevice)) matches the function declaration of /// f. If the matcher is used from clang-query, attr::Kind parameter should be /// passed as a quoted string. e.g., hasAttr("attr::CUDADevice"). AST_MATCHER_P(Decl, hasAttr, attr::Kind, AttrKind) { for (const auto *Attr : Node.attrs()) { if (Attr->getKind() == AttrKind) return true; } return false; } /// Matches the return value expression of a return statement /// /// Given /// \code /// return a + b; /// \endcode /// hasReturnValue(binaryOperator()) /// matches 'return a + b' /// with binaryOperator() /// matching 'a + b' AST_MATCHER_P(ReturnStmt, hasReturnValue, internal::Matcher<Expr>, InnerMatcher) { if (const auto *RetValue = Node.getRetValue()) return InnerMatcher.matches(*RetValue, Finder, Builder); return false; } /// Matches CUDA kernel call expression. /// /// Example matches, /// \code /// kernel<<<i,j>>>(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CUDAKernelCallExpr> cudaKernelCallExpr; /// Matches expressions that resolve to a null pointer constant, such as /// GNU's __null, C++11's nullptr, or C's NULL macro. /// /// Given: /// \code /// void *v1 = NULL; /// void *v2 = nullptr; /// void *v3 = __null; // GNU extension /// char *cp = (char *)0; /// int *ip = 0; /// int i = 0; /// \endcode /// expr(nullPointerConstant()) /// matches the initializer for v1, v2, v3, cp, and ip. Does not match the /// initializer for i. AST_MATCHER_FUNCTION(internal::Matcher<Expr>, nullPointerConstant) { return anyOf( gnuNullExpr(), cxxNullPtrLiteralExpr(), integerLiteral(equals(0), hasParent(expr(hasType(pointerType()))))); } /// Matches declaration of the function the statement belongs to /// /// Given: /// \code /// F& operator=(const F& o) { /// std::copy_if(o.begin(), o.end(), begin(), [](V v) { return v > 0; }); /// return *this; /// } /// \endcode /// returnStmt(forFunction(hasName("operator="))) /// matches 'return *this' /// but does match 'return > 0' AST_MATCHER_P(Stmt, forFunction, internal::Matcher<FunctionDecl>, InnerMatcher) { const auto &Parents = Finder->getASTContext().getParents(Node); llvm::SmallVector<ast_type_traits::DynTypedNode, 8> Stack(Parents.begin(), Parents.end()); while(!Stack.empty()) { const auto &CurNode = Stack.back(); Stack.pop_back(); if(const auto *FuncDeclNode = CurNode.get<FunctionDecl>()) { if(InnerMatcher.matches(*FuncDeclNode, Finder, Builder)) { return true; } } else if(const auto *LambdaExprNode = CurNode.get<LambdaExpr>()) { if(InnerMatcher.matches(*LambdaExprNode->getCallOperator(), Finder, Builder)) { return true; } } else { for(const auto &Parent: Finder->getASTContext().getParents(CurNode)) Stack.push_back(Parent); } } return false; } /// Matches a declaration that has external formal linkage. /// /// Example matches only z (matcher = varDecl(hasExternalFormalLinkage())) /// \code /// void f() { /// int x; /// static int y; /// } /// int z; /// \endcode /// /// Example matches f() because it has external formal linkage despite being /// unique to the translation unit as though it has internal likage /// (matcher = functionDecl(hasExternalFormalLinkage())) /// /// \code /// namespace { /// void f() {} /// } /// \endcode AST_MATCHER(NamedDecl, hasExternalFormalLinkage) { return Node.hasExternalFormalLinkage(); } /// Matches a declaration that has default arguments. /// /// Example matches y (matcher = parmVarDecl(hasDefaultArgument())) /// \code /// void x(int val) {} /// void y(int val = 0) {} /// \endcode AST_MATCHER(ParmVarDecl, hasDefaultArgument) { return Node.hasDefaultArg(); } /// Matches array new expressions. /// /// Given: /// \code /// MyClass *p1 = new MyClass[10]; /// \endcode /// cxxNewExpr(isArray()) /// matches the expression 'new MyClass[10]'. AST_MATCHER(CXXNewExpr, isArray) { return Node.isArray(); } /// Matches array new expressions with a given array size. /// /// Given: /// \code /// MyClass *p1 = new MyClass[10]; /// \endcode /// cxxNewExpr(hasArraySize(intgerLiteral(equals(10)))) /// matches the expression 'new MyClass[10]'. AST_MATCHER_P(CXXNewExpr, hasArraySize, internal::Matcher<Expr>, InnerMatcher) { return Node.isArray() && *Node.getArraySize() && InnerMatcher.matches(**Node.getArraySize(), Finder, Builder); } /// Matches a class declaration that is defined. /// /// Example matches x (matcher = cxxRecordDecl(hasDefinition())) /// \code /// class x {}; /// class y; /// \endcode AST_MATCHER(CXXRecordDecl, hasDefinition) { return Node.hasDefinition(); } /// Matches C++11 scoped enum declaration. /// /// Example matches Y (matcher = enumDecl(isScoped())) /// \code /// enum X {}; /// enum class Y {}; /// \endcode AST_MATCHER(EnumDecl, isScoped) { return Node.isScoped(); } /// Matches a function declared with a trailing return type. /// /// Example matches Y (matcher = functionDecl(hasTrailingReturn())) /// \code /// int X() {} /// auto Y() -> int {} /// \endcode AST_MATCHER(FunctionDecl, hasTrailingReturn) { if (const auto *F = Node.getType()->getAs<FunctionProtoType>()) return F->hasTrailingReturn(); return false; } /// Matches expressions that match InnerMatcher that are possibly wrapped in an /// elidable constructor. /// /// In C++17 copy elidable constructors are no longer being /// generated in the AST as it is not permitted by the standard. They are /// however part of the AST in C++14 and earlier. Therefore, to write a matcher /// that works in all language modes, the matcher has to skip elidable /// constructor AST nodes if they appear in the AST. This matcher can be used to /// skip those elidable constructors. /// /// Given /// /// \code /// struct H {}; /// H G(); /// void f() { /// H D = G(); /// } /// \endcode /// /// ``varDecl(hasInitializer(any( /// ignoringElidableConstructorCall(callExpr()), /// exprWithCleanups(ignoringElidableConstructorCall(callExpr()))))`` /// matches ``H D = G()`` AST_MATCHER_P(Expr, ignoringElidableConstructorCall, ast_matchers::internal::Matcher<Expr>, InnerMatcher) { if (const auto *CtorExpr = dyn_cast<CXXConstructExpr>(&Node)) { if (CtorExpr->isElidable()) { if (const auto *MaterializeTemp = dyn_cast<MaterializeTemporaryExpr>(CtorExpr->getArg(0))) { return InnerMatcher.matches(*MaterializeTemp->GetTemporaryExpr(), Finder, Builder); } } } return InnerMatcher.matches(Node, Finder, Builder); } //----------------------------------------------------------------------------// // OpenMP handling. //----------------------------------------------------------------------------// /// Matches any ``#pragma omp`` executable directive. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel default(none) /// #pragma omp taskyield /// \endcode /// /// ``ompExecutableDirective()`` matches ``omp parallel``, /// ``omp parallel default(none)`` and ``omp taskyield``. extern const internal::VariadicDynCastAllOfMatcher<Stmt, OMPExecutableDirective> ompExecutableDirective; /// Matches standalone OpenMP directives, /// i.e., directives that can't have a structured block. /// /// Given /// /// \code /// #pragma omp parallel /// {} /// #pragma omp taskyield /// \endcode /// /// ``ompExecutableDirective(isStandaloneDirective()))`` matches /// ``omp taskyield``. AST_MATCHER(OMPExecutableDirective, isStandaloneDirective) { return Node.isStandaloneDirective(); } /// Matches the Stmt AST node that is marked as being the structured-block /// of an OpenMP executable directive. /// /// Given /// /// \code /// #pragma omp parallel /// {} /// \endcode /// /// ``stmt(isOMPStructuredBlock()))`` matches ``{}``. AST_MATCHER(Stmt, isOMPStructuredBlock) { return Node.isOMPStructuredBlock(); } /// Matches the structured-block of the OpenMP executable directive /// /// Prerequisite: the executable directive must not be standalone directive. /// If it is, it will never match. /// /// Given /// /// \code /// #pragma omp parallel /// ; /// #pragma omp parallel /// {} /// \endcode /// /// ``ompExecutableDirective(hasStructuredBlock(nullStmt()))`` will match ``;`` AST_MATCHER_P(OMPExecutableDirective, hasStructuredBlock, internal::Matcher<Stmt>, InnerMatcher) { if (Node.isStandaloneDirective()) return false; // Standalone directives have no structured blocks. return InnerMatcher.matches(*Node.getStructuredBlock(), Finder, Builder); } /// Matches any clause in an OpenMP directive. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel default(none) /// \endcode /// /// ``ompExecutableDirective(hasAnyClause(anything()))`` matches /// ``omp parallel default(none)``. AST_MATCHER_P(OMPExecutableDirective, hasAnyClause, internal::Matcher<OMPClause>, InnerMatcher) { ArrayRef<OMPClause *> Clauses = Node.clauses(); return matchesFirstInPointerRange(InnerMatcher, Clauses.begin(), Clauses.end(), Finder, Builder); } /// Matches OpenMP ``default`` clause. /// /// Given /// /// \code /// #pragma omp parallel default(none) /// #pragma omp parallel default(shared) /// #pragma omp parallel /// \endcode /// /// ``ompDefaultClause()`` matches ``default(none)`` and ``default(shared)``. extern const internal::VariadicDynCastAllOfMatcher<OMPClause, OMPDefaultClause> ompDefaultClause; /// Matches if the OpenMP ``default`` clause has ``none`` kind specified. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel default(none) /// #pragma omp parallel default(shared) /// \endcode /// /// ``ompDefaultClause(isNoneKind())`` matches only ``default(none)``. AST_MATCHER(OMPDefaultClause, isNoneKind) { return Node.getDefaultKind() == OMPC_DEFAULT_none; } /// Matches if the OpenMP ``default`` clause has ``shared`` kind specified. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel default(none) /// #pragma omp parallel default(shared) /// \endcode /// /// ``ompDefaultClause(isSharedKind())`` matches only ``default(shared)``. AST_MATCHER(OMPDefaultClause, isSharedKind) { return Node.getDefaultKind() == OMPC_DEFAULT_shared; } /// Matches if the OpenMP directive is allowed to contain the specified OpenMP /// clause kind. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel for /// #pragma omp for /// \endcode /// /// `ompExecutableDirective(isAllowedToContainClause(OMPC_default))`` matches /// ``omp parallel`` and ``omp parallel for``. /// /// If the matcher is use from clang-query, ``OpenMPClauseKind`` parameter /// should be passed as a quoted string. e.g., /// ``isAllowedToContainClauseKind("OMPC_default").`` AST_MATCHER_P(OMPExecutableDirective, isAllowedToContainClauseKind, OpenMPClauseKind, CKind) { return isAllowedClauseForDirective(Node.getDirectiveKind(), CKind); } //----------------------------------------------------------------------------// // End OpenMP handling. //----------------------------------------------------------------------------// } // namespace ast_matchers } // namespace clang #endif // LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H
GB_unop__identity_uint8_uint8.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__(none)) // op(A') function: GB (_unop_tran__identity_uint8_uint8) // C type: uint8_t // A type: uint8_t // cast: uint8_t cij = aij // unaryop: cij = aij #define GB_ATYPE \ uint8_t #define GB_CTYPE \ uint8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint8_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint8_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint8_t z = aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ #if 0 GrB_Info GB (_unop_apply__(none)) ( uint8_t *Cx, // Cx and Ax may be aliased const uint8_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint8_t aij = Ax [p] ; uint8_t z = aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint8_t aij = Ax [p] ; uint8_t z = aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_uint8_uint8) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
pairwise_transform.h
/* * pairwise_transform.h * * Created on: Dec 28, 2015 * Author: agibsonccc */ #ifndef PAIRWISE_TRANSFORM_H_ #define PAIRWISE_TRANSFORM_H_ #ifdef _OPENMP #include <omp.h> #endif #include <templatemath.h> #include <helper_cuda.h> #include <helpers/shape.h> #include <pairwise_util.h> #include <dll.h> #include <stdio.h> #include <ops/ops.h> #include <op_boilerplate.h> #ifdef __CUDACC__ #include <cuda.h> #include <cuda_runtime.h> #endif #ifndef _OPENMP #define omp_get_thread_num() 0 #define omp_get_max_threads() 1 #endif #include "legacy_ops.h" namespace functions { namespace pairwise_transforms { /** * Transforms involving 2 arrays */ template<typename T> class PairWiseTransform { public: #ifdef __CUDACC__ static __host__ void execudaCudaStrided(dim3& launchDims, Nd4jPointer *extraPointers, int opNum, T *dx, int xStride, T *y, int yStride, T *result, int resultStride, T *extraParams, Nd4jIndex n); static __host__ void execudaCudaShaped(dim3& launchDims, Nd4jPointer *extraPointers, int opNum, T *dx, int *xShapeInfo, T *y, int *yShapeInfo, T *result, int *resultShapeInfo, T *extraParams); static __device__ void transformCuda(const int opNum, Nd4jIndex n, T *dx, T *y, int incx, int incy, T *extraParams, T *result, int incz, int *allocationPointer, UnifiedSharedMemory *manager, int *tadOnlyShapeInfo); static __device__ void transformCuda(const int opNum, T *dx, int *xShapeBuffer, T *y, int *yShapeBuffer, T *result, int *resultShapeBuffer, T *extraParams, int *allocationPointer, UnifiedSharedMemory *manager, int *tadOnlyShapeInfo); static __device__ void transformCuda(const int opNum, T *dx, int *xShapeBuffer, T *y, int *yShapeBuffer, T *result, int *resultShapeBuffer, T *extraParams, int *indexes, int *yIndexes, int *resultIndexes, int *allocationPointer, UnifiedSharedMemory *manager, int *tadOnlyShapeInfo); template<typename OpType> static __device__ void transformCuda(T *dx, int *xShapeBuffer, T *y, int *yShapeBuffer, T *result, int *resultShapeBuffer, T *extraParams, int *allocationPointer, UnifiedSharedMemory *manager, int *tadOnlyShapeInfo); template<typename OpType> static __device__ void transformCuda(Nd4jIndex n, T *dx, T *dy, int incx, int incy, T *params, T *result, int incz, int *allocationPointer, UnifiedSharedMemory *manager, int *tadOnlyShapeInfo); template<typename OpType> static __device__ void transform(T *dx, int *xShapeBuffer, T *y, int *yShapeBuffer, T *result, int *resultShapeBuffer, T *extraParams, int *indexes, int *yIndexes, int *resultIndexes, int *allocationPointer, UnifiedSharedMemory *manager, int *tadOnlyShapeInfo); #endif public: static void exec( const int opNum, T *dx, int *xShapeBuffer, T *y, int *yShapeBuffer, T *result, int *resultShapeBuffer, T *extraParams, int *indexes, int *yIndexes, int *resultIndexes) { DISPATCH_BY_OPNUM(exec, PARAMS(dx, xShapeBuffer, y, yShapeBuffer, result, resultShapeBuffer, extraParams, indexes, yIndexes, resultIndexes), PAIRWISE_TRANSFORM_OPS); } static void exec( const int opNum, T *dx, int *xShapeBuffer, T *y, int *yShapeBuffer, T *result, int *resultShapeBuffer, T *extraParams) { DISPATCH_BY_OPNUM(exec, PARAMS(dx, xShapeBuffer, y, yShapeBuffer, result, resultShapeBuffer, extraParams), PAIRWISE_TRANSFORM_OPS); } static void exec( const int opNum, T *dx, Nd4jIndex xStride, T *y, Nd4jIndex yStride, T *result, Nd4jIndex resultStride, T *extraParams, Nd4jIndex n) { DISPATCH_BY_OPNUM(exec, PARAMS(dx, xStride, y, yStride, result, resultStride, extraParams, n), PAIRWISE_TRANSFORM_OPS); } template<typename OpType> static void exec( T *dx, int *xShapeBuffer, T *y, int *yShapeBuffer, T *result, int *resultShapeBuffer, T *extraParams, int *indexes, int *yIndexes, int *resultIndexes) { Nd4jIndex n = shape::length(xShapeBuffer); #pragma omp parallel for simd schedule(guided) proc_bind(AFFINITY) default(shared) for (Nd4jIndex i = 0; i < n; i++) { result[resultIndexes[i]] = OpType::op(dx[indexes[i]], y[yIndexes[i]], extraParams); } } template<typename OpType> static void exec( T *dx, int *xShapeBuffer, T *y, int *yShapeBuffer, T *result, int *resultShapeBuffer, T *extraParams) { Nd4jIndex n = shape::length(xShapeBuffer); int xElementWiseStride = shape::elementWiseStride(xShapeBuffer); int yElementWiseStride = shape::elementWiseStride(yShapeBuffer); int resultElementWiseStride = shape::elementWiseStride(resultShapeBuffer); if (shape::isScalar(yShapeBuffer)) { if (xElementWiseStride == 1 && resultElementWiseStride == 1) { for (int e = 0; e < n; e++) { result[e] = OpType::op(dx[e], y[0], extraParams); } } else { int xCoord[MAX_RANK]; int resultCoord[MAX_RANK]; int xRank = shape::rank(xShapeBuffer); int resultRank = shape::rank(resultShapeBuffer); int *xShape = shape::shapeOf(xShapeBuffer); int *xStride = shape::stride(xShapeBuffer); int *resultShape = shape::shapeOf(resultShapeBuffer); int *resultStride = shape::stride(resultShapeBuffer); int elementsPerThread = n / ELEMENT_THRESHOLD; int num_threads = nd4j::math::nd4j_max<int>(1, elementsPerThread); num_threads = nd4j::math::nd4j_min<int>(num_threads, omp_get_max_threads()); #pragma omp parallel for schedule(guided) num_threads(num_threads) if (num_threads > 1) proc_bind(AFFINITY) default(shared) private(xCoord, resultCoord) for (Nd4jIndex i = 0; i < n; i++) { shape::ind2subC(xRank,xShape, i, xCoord); shape::ind2subC(resultRank,resultShape, i, resultCoord); Nd4jIndex xOffset = shape::getOffset(0, xShape, xStride, xCoord, xRank); Nd4jIndex resultOffset = shape::getOffset(0, resultShape, resultStride, resultCoord, resultRank); result[resultOffset] = OpType::op(dx[xOffset], y[0], extraParams); } } return; } bool sameShape = shape::shapeEquals(shape::rank(xShapeBuffer), shape::shapeOf(xShapeBuffer), shape::rank(yShapeBuffer), shape::shapeOf(yShapeBuffer)); if (xElementWiseStride >= 1 && yElementWiseStride >= 1 && resultElementWiseStride >= 1 && shape::order(xShapeBuffer) == shape::order(yShapeBuffer) && shape::order(resultShapeBuffer) == shape::order(xShapeBuffer) && sameShape && xElementWiseStride == yElementWiseStride) { exec<OpType>(dx, xElementWiseStride, y, yElementWiseStride, result, resultElementWiseStride, extraParams, n); } //not same shape else if (!sameShape && shape::order(xShapeBuffer) == shape::order(yShapeBuffer) && shape::order(resultShapeBuffer) == shape::order(xShapeBuffer) && xElementWiseStride >= 1 && yElementWiseStride >= 1 && resultElementWiseStride >= 1 && xElementWiseStride == yElementWiseStride) { exec<OpType>(dx, xElementWiseStride, y, yElementWiseStride, result, resultElementWiseStride, extraParams, shape::length(yShapeBuffer)); } else if (sameShape) { int rank = shape::rank(xShapeBuffer); int *xShape = shape::shapeOf(xShapeBuffer); int *xStride = shape::stride(xShapeBuffer); int *yStride = shape::stride(yShapeBuffer); int *resultStride = shape::stride(resultShapeBuffer); // tad-oriented rotation technically int tadsPerThread = xShape[0] / TAD_THRESHOLD; int num_threads = nd4j::math::nd4j_max<int>(1, tadsPerThread); num_threads = nd4j::math::nd4j_min<int>(num_threads, omp_get_max_threads()); #pragma omp parallel for schedule(guided) num_threads(num_threads) if (num_threads>1) proc_bind(AFFINITY) default(shared) for (Nd4jIndex i = 0; i < xShape[0]; i++) { T *dxLocal = dx + xStride[0] * i; T *yLocal = y + yStride[0] * i; T *resultLocal = result + resultStride[0] * i; int rankLocal = rank - 1; int *xShapeLocal = xShape + 1; int *xStrideLocal = xStride + 1; int *yStrideLocal = yStride + 1; int *resultStrideLocal = resultStride + 1; int shapeIter[MAX_RANK]; int coord[MAX_RANK]; int dim; int xStridesIter[MAX_RANK]; int yStridesIter[MAX_RANK]; int resultStridesIter[MAX_RANK]; if (PrepareThreeRawArrayIter<T>(rankLocal, xShapeLocal, dxLocal, xStrideLocal, yLocal, yStrideLocal, resultLocal, resultStrideLocal, rankLocal, shapeIter, &dxLocal, xStridesIter, &yLocal, yStridesIter, &resultLocal, resultStridesIter) >= 0) { ND4J_RAW_ITER_START(dim, rankLocal, coord, shapeIter); { // Process the innermost dimension T *xIter = dxLocal; T *yIter = yLocal; T *resultIter = resultLocal; resultIter[0] = OpType::op(xIter[0], yIter[0], extraParams); } ND4J_RAW_ITER_THREE_NEXT(dim, rankLocal, coord, shapeIter, dxLocal, xStridesIter, yLocal, yStridesIter, resultLocal, resultStridesIter); } else { printf("Unable to prepare array\n"); } } } else { Nd4jIndex len = n; int xRank = shape::rank(xShapeBuffer); int yRank = shape::rank(yShapeBuffer); int resultRank = shape::rank(resultShapeBuffer); int *xShape = shape::shapeOf(xShapeBuffer); int *xStride = shape::stride(xShapeBuffer); int *yShape = shape::shapeOf(yShapeBuffer); int *yStride = shape::stride(yShapeBuffer); int *resultShape = shape::shapeOf(resultShapeBuffer); int *resultStride = shape::stride(resultShapeBuffer); int elementsPerThread = n / ELEMENT_THRESHOLD; int num_threads = nd4j::math::nd4j_max<int>(1, elementsPerThread); num_threads = nd4j::math::nd4j_min<int>(num_threads, omp_get_max_threads()); int xCoord[MAX_RANK]; int yCoord[MAX_RANK]; if(dx == result) { #pragma omp parallel for schedule(guided) num_threads(num_threads) if (num_threads > 1) proc_bind(AFFINITY) default(shared) private(xCoord, yCoord) for (Nd4jIndex i = 0; i < len; i++) { shape::ind2subC(xRank,xShape, i, xCoord); shape::ind2subC(yRank,yShape, i, yCoord); Nd4jIndex xOffset = shape::getOffset(0, xShape, xStride, xCoord, xRank); Nd4jIndex yOffset = shape::getOffset(0, yShape, yStride, yCoord, yRank); result[xOffset] = OpType::op(dx[xOffset], y[yOffset], extraParams); } } else { int resultCoord[MAX_RANK]; #pragma omp parallel for schedule(guided) num_threads(num_threads) if (num_threads > 1) proc_bind(AFFINITY) default(shared) private(xCoord, yCoord, resultCoord) for (Nd4jIndex i = 0; i < len; i++) { shape::ind2subC(xRank,xShape, i, xCoord); shape::ind2subC(yRank,yShape, i, yCoord); shape::ind2subC(resultRank,resultShape, i, resultCoord); Nd4jIndex xOffset = shape::getOffset(0, xShape, xStride, xCoord, xRank); Nd4jIndex yOffset = shape::getOffset(0, yShape, yStride, yCoord, yRank); Nd4jIndex resultOffset = shape::getOffset(0, resultShape, resultStride, resultCoord, resultRank); result[resultOffset] = OpType::op(dx[xOffset], y[yOffset], extraParams); } } } } template<typename OpType> static void exec(T *dx, Nd4jIndex xStride, T *y, Nd4jIndex yStride, T *result, Nd4jIndex resultStride, T *extraParams, const Nd4jIndex n) { int elementsPerThread = n / ELEMENT_THRESHOLD; int _threads = nd4j::math::nd4j_max<int>(1, elementsPerThread); _threads = nd4j::math::nd4j_min<int>(_threads, omp_get_max_threads()); int span = (n / _threads) + 8; if (xStride == 1 && yStride == 1 && resultStride == 1) { if (_threads > 1) { #pragma omp parallel num_threads(_threads) if (_threads>1) proc_bind(AFFINITY) default(shared) { Nd4jIndex tid = omp_get_thread_num(); Nd4jIndex start = span * tid; Nd4jIndex end = span * (tid + 1); if (end > n) end = n; #pragma omp simd for (Nd4jIndex i = start; i < end; i++) { result[i] = OpType::op(dx[i], y[i], extraParams); } } } else { #pragma omp simd for (Nd4jIndex i = 0; i < n; i++) { result[i] = OpType::op(dx[i], y[i], extraParams); } } } else { if (_threads > 1) { #pragma omp parallel num_threads(_threads) if (_threads>1) proc_bind(AFFINITY) default(shared) { Nd4jIndex tid = omp_get_thread_num(); Nd4jIndex start = span * tid; Nd4jIndex end = span * (tid + 1); if (end > n) end = n; #pragma omp simd for (Nd4jIndex i = start; i < end; i++) { result[i * resultStride] = OpType::op(dx[i * xStride], y[i * yStride], extraParams); } } } else { #pragma omp simd for (Nd4jIndex i = 0; i < n; i++) { result[i * resultStride] = OpType::op(dx[i * xStride], y[i * yStride], extraParams); } } } } }; } } #endif /* PAIRWISE_TRANSFORM_H_ */
omp_sections_nowait.c
// RUN: %libomp-compile-and-run #include <stdio.h> #include "omp_testsuite.h" /* * This test will hang if the nowait is not working properly * * It relies on a thread skipping to the second sections construct to * release the threads in the first sections construct * * Also, since scheduling of sections is implementation defined, it is * necessary to have all four sections in the second sections construct * release the threads since we can't guarantee which section a single thread * will execute. */ volatile int release; volatile int count; void wait_for_release_then_increment(int rank) { fprintf(stderr, "Thread nr %d enters first section" " and waits.\n", rank); while (release == 0); #pragma omp atomic count++; } void release_and_increment(int rank) { fprintf(stderr, "Thread nr %d sets release to 1\n", rank); release = 1; #pragma omp flush(release) #pragma omp atomic count++; } int test_omp_sections_nowait() { release = 0; count = 0; #pragma omp parallel num_threads(4) { int rank; rank = omp_get_thread_num (); #pragma omp sections nowait { #pragma omp section { wait_for_release_then_increment(rank); } #pragma omp section { wait_for_release_then_increment(rank); } #pragma omp section { wait_for_release_then_increment(rank); } #pragma omp section { fprintf(stderr, "Thread nr %d enters first sections and goes " "immediately to next sections construct to release.\n", rank); #pragma omp atomic count++; } } /* Begin of second sections environment */ #pragma omp sections { #pragma omp section { release_and_increment(rank); } #pragma omp section { release_and_increment(rank); } #pragma omp section { release_and_increment(rank); } #pragma omp section { release_and_increment(rank); } } } // Check to make sure all eight sections were executed return (count==8); } int main() { int i; int num_failed=0; for(i = 0; i < REPETITIONS; i++) { if(!test_omp_sections_nowait()) { num_failed++; } } return num_failed; }
GB_binop__ge_bool.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__ge_bool) // A.*B function (eWiseMult): GB (_AemultB_08__ge_bool) // A.*B function (eWiseMult): GB (_AemultB_02__ge_bool) // A.*B function (eWiseMult): GB (_AemultB_04__ge_bool) // A.*B function (eWiseMult): GB (_AemultB_bitmap__ge_bool) // A*D function (colscale): GB (_AxD__ge_bool) // D*A function (rowscale): GB (_DxB__ge_bool) // C+=B function (dense accum): GB (_Cdense_accumB__ge_bool) // C+=b function (dense accum): GB (_Cdense_accumb__ge_bool) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ge_bool) // C=scalar+B GB (_bind1st__ge_bool) // C=scalar+B' GB (_bind1st_tran__ge_bool) // C=A+scalar GB (_bind2nd__ge_bool) // C=A'+scalar GB (_bind2nd_tran__ge_bool) // C type: bool // A type: bool // A pattern? 0 // B type: bool // B pattern? 0 // BinaryOp: cij = (aij >= bij) #define GB_ATYPE \ bool #define GB_BTYPE \ bool #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ bool aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ bool bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x >= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_GE || GxB_NO_BOOL || GxB_NO_GE_BOOL) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__ge_bool) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__ge_bool) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__ge_bool) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type bool bool bwork = (*((bool *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__ge_bool) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__ge_bool) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__ge_bool) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; bool alpha_scalar ; bool beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((bool *) alpha_scalar_in)) ; beta_scalar = (*((bool *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__ge_bool) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__ge_bool) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__ge_bool) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__ge_bool) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__ge_bool) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; bool x = (*((bool *) x_input)) ; bool *Bx = (bool *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; bool bij = GBX (Bx, p, false) ; Cx [p] = (x >= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__ge_bool) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; bool *Ax = (bool *) Ax_input ; bool y = (*((bool *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; bool aij = GBX (Ax, p, false) ; Cx [p] = (aij >= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ bool aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x >= aij) ; \ } GrB_Info GB (_bind1st_tran__ge_bool) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ bool #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool x = (*((const bool *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ bool } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ bool aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij >= y) ; \ } GrB_Info GB (_bind2nd_tran__ge_bool) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool y = (*((const bool *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
block_matrix.h
#ifndef _BLOCK_MATRIX_H #define _BLOCK_MATRIX_H #include <Eigen/Dense> #include <Eigen/Sparse> namespace BlockMatrix { // Types template<class Type, int RowsSize, int ColsSize> class Matrix : public Eigen::Matrix<Type, RowsSize, ColsSize> { protected: Eigen::VectorXi rows_size_; Eigen::VectorXi cols_size_; public: Eigen::VectorXi getRowsSize() const { return rows_size_; } int getRowsSize(int row_i) const { return rows_size_(row_i); } public: Eigen::VectorXi getColsSize() const { return cols_size_; } int getColsSize(int col_i) const { return cols_size_(col_i); } public: int getTotalRowsSize() const { int total_rows_size=0; for(int i=0; i<rows_size_.rows(); i++) total_rows_size+=rows_size_(i); return total_rows_size; } public: int getTotalColsSize() const { int total_cols_size=0; for(int i=0; i<cols_size_.rows(); i++) total_cols_size+=cols_size_(i); return total_cols_size; } protected: public: int analyse() { // Resize and init to zero -> Not needed. Done when resize() // rows_size_.resize(this->rows()); // rows_size_.setZero(); // cols_size_.resize(this->cols()); // cols_size_.setZero(); // special case 1: this->cols() == 0 if( this->cols() == 0 && this->rows() != 0 ) { return 0; } // special case 2: this->rows() == 0 if(this->rows() == 0 && this->cols() != 0) { return 0; } // special case 3: rows()==0 && cols()==0 if(this->rows() == 0 && this->cols() == 0) { return 0; } // Normal cases // rows for(int i=0; i<this->rows(); i++) { int row_i_dimension=0; for(int j=0; j<this->cols(); j++) { if(this->operator()(i,j).rows() == 0) continue; if(row_i_dimension == 0) { row_i_dimension=this->operator()(i,j).rows(); } if(row_i_dimension != this->operator()(i,j).rows()) { return -1; } } rows_size_(i)=row_i_dimension; } // cols for(int j=0; j<this->cols(); j++) { int col_j_dimension=0; for(int i=0; i<this->rows(); i++) { if(this->operator()(i,j).cols() == 0) continue; if(col_j_dimension == 0) { col_j_dimension=this->operator()(i,j).cols(); } if(col_j_dimension != this->operator()(i,j).cols()) { return -1; } } cols_size_(j)=col_j_dimension; } // End return 0; } public: int createFromEigen(const Type& mat_in, const Eigen::VectorXi& rows_size, const Eigen::VectorXi& cols_size) { // Resize this->resize(rows_size, cols_size); // Checks if( getTotalRowsSize() != mat_in.rows() || getTotalColsSize() != mat_in.cols() ) throw; // Fill blocks int num_rows=rows_size.rows(); int num_cols=cols_size.rows(); int dimension_row_i=0; for(int row=0; row<num_rows; row++) { int dimension_col_i=0; for(int col=0; col<num_cols; col++) { this->operator()(row, col)=mat_in.block(dimension_row_i, dimension_col_i, rows_size_(row, 0), cols_size_(col, 0)); dimension_col_i+=cols_size_(col, 0); } dimension_row_i+=rows_size_(row, 0); } // Analyse if(this->analyse()) throw; // End return 0; } int createFromEigen(const Type& mat_in, const Eigen::VectorXi& rows_size) { Eigen::VectorXi cols_size(1); cols_size(0)=1; int error=createFromEigen(mat_in, rows_size, cols_size); if(error) return error; // End return 0; } void resize(int num_rows, int num_cols) { if(num_rows < 0 || num_cols < 0) throw; // Resize Block Data Matrix Eigen::Matrix<Type, Eigen::Dynamic, Eigen::Dynamic>::resize(num_rows, num_cols); // Resize vectors for rows_size and cols_size rows_size_.resize(num_rows); cols_size_.resize(num_cols); // Set zero rows_size_.setZero(); cols_size_.setZero(); // Resize each block -> Not Needed? /* for(int row=0; row<num_rows; row++) { for(int col=0; col<num_cols; col++) { this->operator()(row, col).resize(0, 0); } } */ return; } void resize(const Eigen::VectorXi& rows_size, const Eigen::VectorXi& cols_size) { // Checks if(rows_size.rows() == 0 || cols_size.rows() == 0) throw; // Reset rows_size_=rows_size; cols_size_=cols_size; // Resize Block Data Matrix int num_rows=rows_size_.rows(); int num_cols=cols_size_.rows(); Eigen::Matrix<Type, Eigen::Dynamic, Eigen::Dynamic>::resize(num_rows, num_cols); // Resize each blocks #pragma omp parallel for for(int row=0; row<num_rows; row++) { #pragma omp parallel for for(int col=0; col<num_cols; col++) { this->operator()(row, col).resize(rows_size_(row,0), cols_size_(col,0)); } } // End return; } Matrix& operator=(const Matrix& other) { // protect against invalid self-assignment if (this != &other) { // Resize this->resize(other.rows(), other.cols()); // Copy Values of each block #pragma omp parallel for for(int row=0; row<this->rows(); row++) { #pragma omp parallel for for(int col=0; col<this->cols(); col++) { this->operator()(row,col)=other(row,col); } } // Analyse if(this->analyse()) throw; } // by convention, always return *this return *this; } Matrix transpose() { Matrix out; // Resize out.resize(this->cols(), this->rows()); // Copy Values of each block #pragma omp parallel for for(int row=0; row<this->rows(); row++) { #pragma omp parallel for for(int col=0; col<this->cols(); col++) { // Resize block out(col,row).resize(this->operator()(row, col).cols(), this->operator()(row, col).rows()); // Set block transposed if(this->operator()(row, col).cols()>0 && this->operator()(row, col).rows()>0) { out(col,row)=this->operator()(row, col).transpose(); } } } // Analyse out if(out.analyse()) throw; return out; } /* bool operator()() { return true; } */ Matrix operator+(const Matrix& sum1) { Matrix sum_result; // Check big sizes if(sum1.cols() != this->cols()) throw; if(sum1.rows() != this->rows()) throw; // Check blocks sizes //Check if(this->getColsSize() != sum1.getColsSize()) throw; if(this->getRowsSize() != sum1.getRowsSize()) throw; // Everything is fine. // Resize the result sum_result.resize(this->rows(), this->cols()); // Sum the blocks #pragma omp parallel for for(int i=0; i<this->rows(); i++) { #pragma omp parallel for for(int j=0; j<this->cols(); j++) { // Two blocks non zero if( ( this->operator()(i,j).cols() != 0 && this->operator()(i,j).rows() != 0 ) && ( sum1(i,j).cols() != 0 && sum1(i,j).rows() != 0 ) ) { // Resize sum_result(i,j).resize(sum1(i,j).rows(), sum1(i,j).cols()); // Sum sum_result(i,j)=sum1(i,j)+this->operator()(i,j); continue; } // this(i,j) block zero else if( ( this->operator()(i,j).cols() == 0 || this->operator()(i,j).rows() == 0 ) && ( sum1(i,j).cols() != 0 && sum1(i,j).rows() != 0 ) ) { // Resize sum_result(i,j).resize(sum1(i,j).rows(), sum1(i,j).cols()); // Sum sum_result(i,j)=sum1(i,j); } // sum1(i,j) block zero else if( ( this->operator()(i,j).cols() != 0 && this->operator()(i,j).rows() != 0 ) && ( sum1(i,j).cols() == 0 || sum1(i,j).rows() == 0 ) ) { // Resize sum_result(i,j).resize(this->operator()(i,j).rows(), this->operator()(i,j).cols()); // Sum sum_result(i,j)=this->operator()(i,j); } // The two blocks zero else { // Do nothing continue; } } } // Analyse the result if(sum_result.analyse()) throw; // End return sum_result; } Matrix& operator+=(const Matrix& sum1) { *this = *this + sum1; return *this; // Operator+() //return this->operator+(sum1); } }; using MatrixSparse = Matrix<Eigen::SparseMatrix<double>, Eigen::Dynamic, Eigen::Dynamic>; /* class MatrixSparse : public Matrix<Eigen::SparseMatrix<double>, Eigen::Dynamic, Eigen::Dynamic> { }; */ using MatrixDense = Matrix<Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic>, Eigen::Dynamic, Eigen::Dynamic>; /* class MatrixDense : public Matrix<Eigen::MatrixXd, Eigen::Dynamic, Eigen::Dynamic> { }; class VectorSparse : public Matrix<Eigen::SparseMatrix<double>, Eigen::Dynamic, 1> { }; class VectorDense : public Matrix<Eigen::MatrixXd, Eigen::Dynamic, 1> { }; */ MatrixSparse operator*(const MatrixSparse &prod1, const MatrixSparse &prod2); MatrixDense operator*(const MatrixDense &prod1, const MatrixSparse &prod2); MatrixDense operator*(const MatrixSparse &prod1, const MatrixDense &prod2); //MatrixSparse operator+(MatrixSparse &sum1, MatrixSparse &sum2); MatrixDense operator+(const MatrixDense &sum1, const MatrixSparse &sum2); MatrixDense operator+(const MatrixSparse &sum1, const MatrixDense &sum2); /* template<class TypeResult, class TypeProd1, class TypeProd2> TypeResult operator*(const TypeProd1 &prod1, const TypeProd2 &prod2) { //Matrix<TypeR> product_matrix; TypeResult product_matrix; // Check multiplication sizes if(prod1.cols() != prod2.rows()) throw; // Resize product_matrix.resize(prod1.rows(), prod2.cols()); // Multiplication for(int i=0; i<prod1.rows(); i++) for(int j=0; j<prod2.cols(); j++) { // First pass -> checks and dimension of the block int row_block=-1; int col_block=-1; for(int k=0; k<prod1.cols(); k++) { // Check prod1 if(prod1(i,k).cols() == 0 || prod1(k,j).rows() == 0) continue; // Check prod2 if(prod2(i,k).cols() == 0 || prod2(k,j).rows() == 0) continue; // Check prod1 * prod2 if(prod1(i,k).cols() != prod2(k,j).rows()) throw; // Rows and cols if(row_block<0) row_block=prod1(k,j).rows(); if(col_block<0) col_block=prod2(k,j).cols(); // Check if(row_block != prod1(k,j).rows()) throw; if(col_block != prod2(k,j).cols()) throw; } // Dimensions if(row_block < 0 || col_block < 0) continue; // Resize and set zero product_matrix(i,j).resize(row_block, col_block); product_matrix(i,j).setZero(); // Second pass -> Multiplication for(int k=0; k<prod1.cols(); k++) { if(prod1(i,k).cols() == row_block && prod2(k,j).rows() == col_block) product_matrix(i,j)+= prod1(i,k)*prod2(k,j); } } // End return product_matrix; } // Template specialization template<> MatrixSparse operator*(const MatrixSparse &prod1, const MatrixSparse &prod2); */ Eigen::SparseMatrix<double> convertToEigenSparse(const MatrixSparse &in); // TODO //Eigen::MatrixXd convertToEigenDense(MatrixSparse& in); Eigen::MatrixXd convertToEigenDense(const MatrixSparse &in); Eigen::MatrixXd convertToEigenDense(const MatrixDense &in); std::vector< Eigen::Triplet<double> > getVectorEigenTripletFromEigenDense(const Eigen::MatrixXd& in, int row_offset=0, int col_offset=0); int insertVectorEigenTripletFromEigenDense(std::vector< Eigen::Triplet<double> >& triplet, const Eigen::MatrixXd& in, int row_offset=0, int col_offset=0); } #endif
SE3P_direct_fd.c
#include "math.h" #include "mex.h" #define IDX prhs[0] #define X prhs[1] // Source locations #define Q prhs[2] // Source strengths #define OPT prhs[3] // Parameters #define PHI plhs[0] // Output #ifndef VERBOSE #define VERBOSE 0 #endif #define PI 3.141592653589793 typedef struct { double box[3]; double xi; int layers; double rc; } ewald_opts; void unpack_opt(ewald_opts* opt, const mxArray* mx_opt) { // mandatory options -- will trigger core dump if missing opt->xi = mxGetScalar(mxGetField(mx_opt,0,"xi")); double* box = mxGetPr(mxGetField(mx_opt,0,"box")); opt->box[0] = box[0]; opt->box[1] = box[1]; opt->box[2] = box[2]; // layers: mandatory for ewald sums that are truncated const mxArray* mx_layers = mxGetField(mx_opt,0,"layers"); if(mx_layers) opt->layers = (int)mxGetScalar(mx_layers); else opt->layers = -1; // rc: mandatory for short-range real sum const mxArray* mx_rc = mxGetField(mx_opt,0,"real_cutoff"); if(mx_rc) opt->rc = mxGetScalar(mx_rc); else opt->rc = -1; } // MATLAB (one-based, doubles) to C (zero-based, integers) index translation void index_translation(int* idx, const double* idx_d, int N) { for(int i=0; i<N; i++) idx[i] = (int)idx_d[i] - 1; } /* no input checking is done */ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[] ) { // input dims const int N = mxGetM(X); const int num_eval = mxGetN(IDX); // FIXME: indices assumed to be row vec const double* idx_d = mxGetPr(IDX); int* idx = mxMalloc(num_eval*sizeof(int)); index_translation(idx, idx_d, num_eval); const double* x = mxGetPr(X); const double* q = mxGetPr(Q); PHI = mxCreateDoubleMatrix(num_eval, 1, mxREAL); double* restrict phi = mxGetPr(PHI); ewald_opts opt; unpack_opt(&opt, OPT); if(VERBOSE) { mexPrintf("[EWALD (%s)] MEX N=(%d,%d) ","FS3P",N,num_eval); mexPrintf("xi = %.2f [rc = %.2f, layers=%d]\n", opt.xi,opt.rc,opt.layers); } // call kernel double k[3]; double k2, z, p; double c = 4*PI/(opt.box[0]*opt.box[1]*opt.box[2]); double fac[3] = {2.*PI/opt.box[0], 2.*PI/opt.box[1], 2.*PI/opt.box[2]}; double a = 1.0/(4*opt.xi*opt.xi); #ifdef _OPENMP #pragma omp parallel for private(k,k2,z,p) #endif for(int m=0; m<num_eval; m++) { p = 0; double xm[3] = {x[idx[m]],x[idx[m]+N],x[idx[m]+2*N]}; for(int j0 = -opt.layers; j0<=opt.layers; j0++) for(int j1 = -opt.layers; j1<=opt.layers; j1++) for(int j2 = -opt.layers; j2<=opt.layers; j2++) { if(j0 == 0 && j1 == 0 && j2==0) continue; k[0] = fac[0]*j0; k[1] = fac[1]*j1; k[2] = fac[2]*j2; k2 = k[0]*k[0] + k[1]*k[1] + k[2]*k[2]; z=0; for(int n = 0; n<N; n++) { double tmp = -(k[0]*(xm[0]-x[n] )+ k[1]*(xm[1]-x[n+N] )+ k[2]*(xm[2]-x[n+2*N]) ); z += q[n]*cos(tmp); } p += z*exp(-a*k2)/k2; } phi[m] += c*p; } mxFree(idx); }
GB_unop__minv_int64_int64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__minv_int64_int64) // op(A') function: GB (_unop_tran__minv_int64_int64) // C type: int64_t // A type: int64_t // cast: int64_t cij = aij // unaryop: cij = GB_IMINV_SIGNED (aij, 64) #define GB_ATYPE \ int64_t #define GB_CTYPE \ int64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_SIGNED (x, 64) ; // casting #define GB_CAST(z, aij) \ int64_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int64_t z = aij ; \ Cx [pC] = GB_IMINV_SIGNED (z, 64) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__minv_int64_int64) ( int64_t *Cx, // Cx and Ax may be aliased const int64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int64_t aij = Ax [p] ; int64_t z = aij ; Cx [p] = GB_IMINV_SIGNED (z, 64) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int64_t aij = Ax [p] ; int64_t z = aij ; Cx [p] = GB_IMINV_SIGNED (z, 64) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__minv_int64_int64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
yada.c
/* ============================================================================= * * yada.c * * ============================================================================= * * Copyright (C) Stanford University, 2006. All Rights Reserved. * Author: Chi Cao Minh * * ============================================================================= * * For the license of bayes/sort.h and bayes/sort.c, please see the header * of the files. * * ------------------------------------------------------------------------ * * For the license of kmeans, please see kmeans/LICENSE.kmeans * * ------------------------------------------------------------------------ * * For the license of ssca2, please see ssca2/COPYRIGHT * * ------------------------------------------------------------------------ * * For the license of lib/mt19937ar.c and lib/mt19937ar.h, please see the * header of the files. * * ------------------------------------------------------------------------ * * For the license of lib/rbtree.h and lib/rbtree.c, please see * lib/LEGALNOTICE.rbtree and lib/LICENSE.rbtree * * ------------------------------------------------------------------------ * * Unless otherwise noted, the following license applies to STAMP files: * * Copyright (c) 2007, Stanford University * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of Stanford University nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY STANFORD UNIVERSITY ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL STANFORD UNIVERSITY BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. * * ============================================================================= */ #include <assert.h> #include <getopt.h> #include <stdio.h> #include <stdlib.h> #include "region.h" #include "list.h" #include "mesh.h" #include "heap.h" #include "thread.h" #include "timer.h" #include "tm.h" #define PARAM_DEFAULT_INPUTPREFIX ("") #define PARAM_DEFAULT_NUMTHREAD (1L) #define PARAM_DEFAULT_ANGLE (20.0) const char* global_inputPrefix = PARAM_DEFAULT_INPUTPREFIX; long global_numThread = PARAM_DEFAULT_NUMTHREAD; double global_angleConstraint = PARAM_DEFAULT_ANGLE; mesh_t* global_meshPtr; heap_t* global_workHeapPtr; long* global_totalNumAdded; long* global_numProcess = 0; /* ============================================================================= * displayUsage * ============================================================================= */ static void displayUsage (const char* appName) { printf("Usage: %s [options]\n", appName); puts("\nOptions: (defaults)\n"); printf(" a <FLT> Min [a]ngle constraint (%lf)\n", PARAM_DEFAULT_ANGLE); printf(" i <STR> [i]nput name prefix (%s)\n", PARAM_DEFAULT_INPUTPREFIX); printf(" t <UINT> Number of [t]hreads (%li)\n", PARAM_DEFAULT_NUMTHREAD); exit(1); } /* ============================================================================= * parseArgs * ============================================================================= */ static void parseArgs (long argc, char* const argv[]) { long i; long opt; opterr = 0; while ((opt = getopt(argc, argv, "a:i:t:")) != -1) { switch (opt) { case 'a': global_angleConstraint = atof(optarg); break; case 'i': global_inputPrefix = optarg; break; case 't': global_numThread = atol(optarg); break; case '?': default: opterr++; break; } } for (i = optind; i < argc; i++) { fprintf(stderr, "Non-option argument: %s\n", argv[i]); opterr++; } if (opterr) { displayUsage(argv[0]); } } /* ============================================================================= * initializeWork * ============================================================================= */ static long initializeWork (heap_t* workHeapPtr, mesh_t* meshPtr) { random_t* randomPtr = random_alloc(); random_seed(randomPtr, 0); mesh_shuffleBad(meshPtr, randomPtr); random_free(randomPtr); long numBad = 0; while (1) { element_t* elementPtr = mesh_getBad(meshPtr); if (!elementPtr) { break; } numBad++; bool_t status = heap_insert(workHeapPtr, (void*)elementPtr); assert(status); element_setIsReferenced(elementPtr, TRUE); } return numBad; } /* ============================================================================= * process * ============================================================================= */ void process () { TM_THREAD_ENTER(); heap_t* workHeapPtr = global_workHeapPtr; mesh_t* meshPtr = global_meshPtr; region_t* regionPtr; long totalNumAdded = 0; long numProcess = 0; regionPtr = PREGION_ALLOC(); assert(regionPtr); while (1) { element_t* elementPtr; TM_BEGIN(); elementPtr = (element_t*)TMHEAP_REMOVE(workHeapPtr); TM_END(); if (elementPtr == NULL) { break; } bool_t isGarbage; TM_BEGIN(); isGarbage = TMELEMENT_ISGARBAGE(elementPtr); TM_END(); if (isGarbage) { /* * Handle delayed deallocation */ PELEMENT_FREE(elementPtr); continue; } long numAdded; TM_BEGIN(); PREGION_CLEARBAD(regionPtr); numAdded = TMREGION_REFINE(regionPtr, elementPtr, meshPtr); TM_END(); TM_BEGIN(); TMELEMENT_SETISREFERENCED(elementPtr, FALSE); isGarbage = TMELEMENT_ISGARBAGE(elementPtr); TM_END(); if (isGarbage) { /* * Handle delayed deallocation */ PELEMENT_FREE(elementPtr); } totalNumAdded += numAdded; TM_BEGIN(); TMREGION_TRANSFERBAD(regionPtr, workHeapPtr); TM_END(); numProcess++; } // printf("yada\n"); TM_BEGIN(); TM_SHARED_WRITE_L(*global_totalNumAdded, TM_SHARED_READ_L(*global_totalNumAdded) + totalNumAdded); TM_SHARED_WRITE_L(*global_numProcess, TM_SHARED_READ_L(*global_numProcess) + numProcess); TM_END(); // printf("yadaEnd\n"); PREGION_FREE(regionPtr); TM_THREAD_EXIT(); } comparator_t yada_heapcompare(&element_heapCompare, &TMelement_heapCompare); /* ============================================================================= * main * ============================================================================= */ MAIN(argc, argv) { /* * Initialization */ global_numProcess = (long*)hcmalloc(sizeof(long)); global_totalNumAdded = (long*)hcmalloc(sizeof(long)); parseArgs(argc, (char** const)argv); SIM_GET_NUM_CPU(global_numThread); TM_STARTUP(global_numThread); P_MEMORY_STARTUP(global_numThread); thread_startup(global_numThread); global_meshPtr = mesh_alloc(); assert(global_meshPtr); printf("Angle constraint = %lf\n", global_angleConstraint); printf("Reading input... "); long initNumElement = mesh_read(global_meshPtr, (char*)global_inputPrefix); puts("done."); global_workHeapPtr = heap_alloc(1, &yada_heapcompare); assert(global_workHeapPtr); long initNumBadElement = initializeWork(global_workHeapPtr, global_meshPtr); printf("Initial number of mesh elements = %li\n", initNumElement); printf("Initial number of bad elements = %li\n", initNumBadElement); printf("Starting triangulation..."); fflush(stdout); /* * Run benchmark */ // NB: Since ASF/PTLSim "REAL" is native execution, and since we are using // wallclock time, we want to be sure we read time inside the // simulator, or else we report native cycles spent on the benchmark // instead of simulator cycles. GOTO_SIM(); TIMER_T start; TIMER_READ(start); #ifdef OTM #pragma omp parallel { process(); } #else thread_start((void(*)(void*))process, NULL); #endif TIMER_T stop; TIMER_READ(stop); // NB: As above, timer reads must be done inside of the simulated region // for PTLSim/ASF GOTO_REAL(); puts(" done."); printf("Elapsed time = %0.3lf\n", TIMER_DIFF_SECONDS(start, stop)); fflush(stdout); /* * Check solution */ long finalNumElement = initNumElement + *global_totalNumAdded; printf("Final mesh size = %li\n", finalNumElement); printf("Number of elements processed = %li\n", *global_numProcess); fflush(stdout); #if 1 bool_t isSuccess = mesh_check(global_meshPtr, finalNumElement); #else bool_t isSuccess = TRUE; #endif printf("Final mesh is %s\n", (isSuccess ? "valid." : "INVALID!")); fflush(stdout); //assert(isSuccess); /* * TODO: deallocate mesh and work heap */ TM_SHUTDOWN(); P_MEMORY_SHUTDOWN(); GOTO_SIM(); thread_shutdown(); MAIN_RETURN(0); } /* ============================================================================= * * End of ruppert.c * * ============================================================================= */
trmv_x_csc_u_hi_trans.c
#include "alphasparse/kernel.h" #include "alphasparse/util.h" #include "alphasparse/opt.h" #ifdef _OPENMP #include <omp.h> #endif #include <string.h> #include <memory.h> static alphasparse_status_t trmv_csc_u_hi_trans_unroll4(const ALPHA_Number alpha, const ALPHA_SPMAT_CSC* A, const ALPHA_Number* x, const ALPHA_Number beta, ALPHA_Number* y, ALPHA_INT lrs, ALPHA_INT lre) { ALPHA_INT m = A->cols; for (ALPHA_INT i = lrs; i < lre; i++) { register ALPHA_Number tmp0; register ALPHA_Number tmp1; register ALPHA_Number tmp2; register ALPHA_Number tmp3; alpha_setzero(tmp0); alpha_setzero(tmp1); alpha_setzero(tmp2); alpha_setzero(tmp3); ALPHA_INT pks = A->cols_start[i]; ALPHA_INT pke = A->cols_end[i]; ALPHA_INT pkl = pke - pks; ALPHA_INT pkl4 = pkl - 4; ALPHA_INT row_ind0, row_ind1, row_ind2, row_ind3; ALPHA_Number *A_val = &A->values[pks]; ALPHA_INT *A_row = &A->row_indx[pks]; ALPHA_INT pi; for (pi = 0; pi < pkl4; pi += 4) { row_ind0 = A_row[pi]; row_ind1 = A_row[pi + 1]; row_ind2 = A_row[pi + 2]; row_ind3 = A_row[pi + 3]; if (row_ind3 < i){ alpha_madde(tmp0, A_val[pi], x[row_ind0]); alpha_madde(tmp1, A_val[pi+1], x[row_ind1]); alpha_madde(tmp2, A_val[pi+2], x[row_ind2]); alpha_madde(tmp3, A_val[pi+3], x[row_ind3]); }else if (row_ind2 < i){ alpha_madde(tmp1, A_val[pi], x[row_ind0]); alpha_madde(tmp2, A_val[pi+1], x[row_ind1]); alpha_madde(tmp3, A_val[pi+2], x[row_ind2]); }else if (row_ind1 < i){ alpha_madde(tmp2, A_val[pi], x[row_ind0]); alpha_madde(tmp3, A_val[pi+1], x[row_ind1]); }else if (row_ind0 < i){ alpha_madde(tmp3, A_val[pi], x[row_ind0]); } } for (; pi < pkl; pi += 1) { if (A_row[pi] < i) { alpha_madde(tmp0, A_val[pi], x[A_row[pi]]); } } alpha_add(tmp0, tmp0, tmp1); alpha_add(tmp2, tmp2, tmp3); alpha_add(tmp0, tmp0, tmp2); alpha_add(tmp0, tmp0, x[i]); alpha_mul(tmp0, tmp0, alpha); alpha_mul(tmp1, beta, y[i]); alpha_add(y[i], tmp0, tmp1); } return ALPHA_SPARSE_STATUS_SUCCESS; } static alphasparse_status_t trmv_csc_u_hi_trans_omp(const ALPHA_Number alpha, const ALPHA_SPMAT_CSC* A, const ALPHA_Number* x, const ALPHA_Number beta, ALPHA_Number* y) { ALPHA_INT n = A->cols; ALPHA_INT num_threads = alpha_get_thread_num(); ALPHA_INT partition[num_threads + 1]; balanced_partition_row_by_nnz(A->cols_end, n, num_threads, partition); #ifdef _OPENMP #pragma omp parallel num_threads(num_threads) #endif { ALPHA_INT tid = alpha_get_thread_id(); ALPHA_INT local_n_s = partition[tid]; ALPHA_INT local_n_e = partition[tid + 1]; trmv_csc_u_hi_trans_unroll4(alpha,A,x,beta,y,local_n_s,local_n_e); } return ALPHA_SPARSE_STATUS_SUCCESS; } alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_CSC *A, const ALPHA_Number *x, const ALPHA_Number beta, ALPHA_Number *y) { return trmv_csc_u_hi_trans_omp(alpha, A, x, beta, y); }
QLA_D3_r1_veq_norm2_V.c
/**************** QLA_D3_r_veq_norm2_V.c ********************/ #include <stdio.h> #include <qla_config.h> #include <qla_types.h> #include <qla_random.h> #include <qla_cmath.h> #include <qla_d3.h> #include <math.h> static void start_slice(){ __asm__ __volatile__ (""); } static void end_slice(){ __asm__ __volatile__ (""); } void QLA_D3_r_veq_norm2_V ( QLA_D_Real *restrict r, QLA_D3_ColorVector *restrict a, int n) { start_slice(); #ifdef HAVE_XLC #pragma disjoint(*r,*a) __alignx(16,r); __alignx(16,a); #endif QLA_D_Real sum; sum = 0.; #pragma omp parallel for reduction(+:sum) for(int i=0; i<n; i++) { for(int i_c=0; i_c<3; i_c++) { QLA_D_Complex at; QLA_c_eq_c(at,QLA_D3_elem_V(a[i],i_c)); sum += QLA_norm2_c(at); } } *r = sum; end_slice(); }
3d7pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 32; tile_size[1] = 32; tile_size[2] = 24; tile_size[3] = 32; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,16);t1++) { lbp=max(ceild(t1,2),ceild(32*t1-Nt+3,32)); ubp=min(floord(Nt+Nz-4,32),floord(16*t1+Nz+13,32)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(2*t1-2,3)),ceild(32*t2-Nz-20,24));t3<=min(min(min(floord(Nt+Ny-4,24),floord(16*t1+Ny+29,24)),floord(32*t2+Ny+28,24)),floord(32*t1-32*t2+Nz+Ny+27,24));t3++) { for (t4=max(max(max(0,ceild(t1-1,2)),ceild(32*t2-Nz-28,32)),ceild(24*t3-Ny-28,32));t4<=min(min(min(min(floord(Nt+Nx-4,32),floord(16*t1+Nx+29,32)),floord(32*t2+Nx+28,32)),floord(24*t3+Nx+20,32)),floord(32*t1-32*t2+Nz+Nx+27,32));t4++) { for (t5=max(max(max(max(max(0,16*t1),32*t1-32*t2+1),32*t2-Nz+2),24*t3-Ny+2),32*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,16*t1+31),32*t2+30),24*t3+22),32*t4+30),32*t1-32*t2+Nz+29);t5++) { for (t6=max(max(32*t2,t5+1),-32*t1+32*t2+2*t5-31);t6<=min(min(32*t2+31,-32*t1+32*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(24*t3,t5+1);t7<=min(24*t3+23,t5+Ny-2);t7++) { lbv=max(32*t4,t5+1); ubv=min(32*t4+31,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
GB_unaryop__ainv_bool_fp64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_bool_fp64 // op(A') function: GB_tran__ainv_bool_fp64 // C type: bool // A type: double // cast: bool cij = (bool) aij // unaryop: cij = aij #define GB_ATYPE \ double #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ bool z = (bool) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_BOOL || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_bool_fp64 ( bool *restrict Cx, const double *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_bool_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
convolution_1x1_pack4to1_bf16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv1x1s1_sgemm_pack4to1_bf16s_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; const int size = w * h; Mat bottom_im2col = bottom_blob; bottom_im2col.w = size; bottom_im2col.h = 1; im2col_sgemm_pack4to1_bf16s_neon(bottom_im2col, top_blob, kernel, _bias, opt); } static void conv1x1s2_sgemm_pack4to1_bf16s_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int channels = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; const int tailstep = (w - 2 * outw + w) * 4; Mat bottom_blob_shrinked; bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < channels; p++) { const unsigned short* r0 = bottom_blob.channel(p); unsigned short* outptr = bottom_blob_shrinked.channel(p); for (int i = 0; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { uint16x4_t _v0 = vld1_u16(r0); uint16x4_t _v1 = vld1_u16(r0 + 8); uint16x4_t _v2 = vld1_u16(r0 + 16); uint16x4_t _v3 = vld1_u16(r0 + 24); uint16x8_t _v01 = vcombine_u16(_v0, _v1); uint16x8_t _v23 = vcombine_u16(_v2, _v3); vst1q_u16(outptr, _v01); vst1q_u16(outptr + 8, _v23); r0 += 32; outptr += 16; } for (; j + 1 < outw; j += 2) { uint16x4_t _v0 = vld1_u16(r0); uint16x4_t _v1 = vld1_u16(r0 + 8); uint16x8_t _v = vcombine_u16(_v0, _v1); vst1q_u16(outptr, _v); r0 += 16; outptr += 8; } for (; j < outw; j++) { uint16x4_t _v = vld1_u16(r0); vst1_u16(outptr, _v); r0 += 8; outptr += 4; } r0 += tailstep; } } conv1x1s1_sgemm_pack4to1_bf16s_neon(bottom_blob_shrinked, top_blob, kernel, _bias, opt); }
acado_integrator.c
/* * This file was auto-generated using the ACADO Toolkit. * * While ACADO Toolkit is free software released under the terms of * the GNU Lesser General Public License (LGPL), the generated code * as such remains the property of the user who used ACADO Toolkit * to generate this code. In particular, user dependent data of the code * do not inherit the GNU LGPL license. On the other hand, parts of the * generated code that are a direct copy of source code from the * ACADO Toolkit or the software tools it is based on, remain, as derived * work, automatically covered by the LGPL license. * * ACADO Toolkit is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * */ #include "acado_common.h" real_t rk_dim9_swap; /** Column vector of size: 9 */ real_t rk_dim9_bPerm[ 9 ]; /** Column vector of size: 62 */ real_t auxVar[ 62 ]; real_t rk_ttt; /** Row vector of size: 21 */ real_t rk_xxx[ 21 ]; /** Column vector of size: 9 */ real_t rk_kkk[ 9 ]; /** Matrix of size: 9 x 9 (row major format) */ real_t rk_A[ 81 ]; /** Column vector of size: 9 */ real_t rk_b[ 9 ]; /** Row vector of size: 9 */ int rk_dim9_perm[ 9 ]; /** Column vector of size: 9 */ real_t rk_rhsTemp[ 9 ]; /** Row vector of size: 108 */ real_t rk_diffsTemp2[ 108 ]; /** Column vector of size: 9 */ real_t rk_diffK[ 9 ]; /** Matrix of size: 9 x 12 (row major format) */ real_t rk_diffsNew2[ 108 ]; #pragma omp threadprivate( auxVar, rk_ttt, rk_xxx, rk_kkk, rk_diffK, rk_rhsTemp, rk_dim9_perm, rk_A, rk_b, rk_diffsNew2, rk_diffsTemp2, rk_dim9_swap, rk_dim9_bPerm ) void acado_rhs(const real_t* in, real_t* out) { const real_t* xd = in; const real_t* u = in + 9; const real_t* od = in + 12; /* Vector of auxiliary variables; number of elements: 29. */ real_t* a = auxVar; /* Compute intermediate quantities: */ a[0] = (sin(xd[4])); a[1] = (cos(xd[3])); a[2] = (cos(xd[5])); a[3] = (sin(xd[4])); a[4] = (sin(xd[3])); a[5] = (sin(xd[5])); a[6] = (cos(xd[4])); a[7] = (cos(xd[5])); a[8] = (cos(xd[4])); a[9] = (sin(xd[5])); a[10] = (cos(xd[3])); a[11] = (sin(xd[5])); a[12] = (cos(xd[5])); a[13] = (sin(xd[4])); a[14] = (sin(xd[3])); a[15] = (cos(xd[3])); a[16] = (cos(xd[5])); a[17] = (sin(xd[4])); a[18] = (sin(xd[3])); a[19] = (sin(xd[5])); a[20] = (cos(xd[3])); a[21] = (sin(xd[4])); a[22] = (sin(xd[5])); a[23] = (cos(xd[5])); a[24] = (sin(xd[3])); a[25] = (cos(xd[4])); a[26] = (sin(xd[3])); a[27] = (cos(xd[4])); a[28] = (cos(xd[3])); /* Compute outputs: */ out[0] = (((((((((real_t)(0.0000000000000000e+00)-((real_t)(0.0000000000000000e+00)-a[0]))*od[4])*u[2])*xd[2])+((((a[1]*a[2])*a[3])+(a[4]*a[5]))*u[2]))-((((a[6]*a[7])*od[4])*u[2])*xd[0]))-((((a[8]*od[4])*a[9])*u[2])*xd[1]))+od[6]); out[1] = (((((((((real_t)(0.0000000000000000e+00)-((((real_t)(0.0000000000000000e+00)-a[10])*a[11])+((a[12]*a[13])*a[14])))*od[5])*u[2])*xd[0])-(((((a[15]*a[16])+((a[17]*a[18])*a[19]))*od[5])*u[2])*xd[1]))+((((a[20]*a[21])*a[22])-(a[23]*a[24]))*u[2]))-((((a[25]*od[5])*a[26])*u[2])*xd[2]))+od[7]); out[2] = (((real_t)(-9.8065999999999995e+00)+((a[27]*a[28])*u[2]))+od[8]); out[3] = ((((real_t)(0.0000000000000000e+00)-xd[3])+(od[1]*u[0]))/od[0]); out[4] = ((((real_t)(0.0000000000000000e+00)-xd[4])+(od[3]*u[1]))/od[2]); out[5] = (real_t)(0.0000000000000000e+00); out[6] = xd[0]; out[7] = xd[1]; out[8] = xd[2]; } void acado_diffs(const real_t* in, real_t* out) { const real_t* xd = in; const real_t* u = in + 9; const real_t* od = in + 12; /* Vector of auxiliary variables; number of elements: 62. */ real_t* a = auxVar; /* Compute intermediate quantities: */ a[0] = (cos(xd[4])); a[1] = (cos(xd[5])); a[2] = (cos(xd[4])); a[3] = (sin(xd[5])); a[4] = (sin(xd[4])); a[5] = ((real_t)(-1.0000000000000000e+00)*(sin(xd[3]))); a[6] = (cos(xd[5])); a[7] = (sin(xd[4])); a[8] = (cos(xd[3])); a[9] = (sin(xd[5])); a[10] = (cos(xd[4])); a[11] = (cos(xd[3])); a[12] = (cos(xd[4])); a[13] = ((real_t)(-1.0000000000000000e+00)*(sin(xd[4]))); a[14] = ((real_t)(-1.0000000000000000e+00)*(sin(xd[4]))); a[15] = ((real_t)(-1.0000000000000000e+00)*(sin(xd[5]))); a[16] = (sin(xd[3])); a[17] = (cos(xd[5])); a[18] = ((real_t)(-1.0000000000000000e+00)*(sin(xd[5]))); a[19] = (cos(xd[5])); a[20] = (cos(xd[3])); a[21] = (sin(xd[5])); a[22] = (cos(xd[5])); a[23] = (sin(xd[4])); a[24] = (sin(xd[3])); a[25] = (cos(xd[3])); a[26] = (cos(xd[5])); a[27] = (sin(xd[4])); a[28] = (sin(xd[3])); a[29] = (sin(xd[5])); a[30] = (cos(xd[4])); a[31] = (sin(xd[3])); a[32] = ((real_t)(-1.0000000000000000e+00)*(sin(xd[3]))); a[33] = (cos(xd[3])); a[34] = ((real_t)(-1.0000000000000000e+00)*(sin(xd[3]))); a[35] = (cos(xd[3])); a[36] = ((real_t)(-1.0000000000000000e+00)*(sin(xd[3]))); a[37] = (sin(xd[4])); a[38] = (sin(xd[5])); a[39] = (cos(xd[5])); a[40] = (cos(xd[3])); a[41] = (cos(xd[3])); a[42] = (cos(xd[4])); a[43] = (cos(xd[4])); a[44] = (cos(xd[3])); a[45] = (cos(xd[4])); a[46] = ((real_t)(-1.0000000000000000e+00)*(sin(xd[4]))); a[47] = (cos(xd[5])); a[48] = ((real_t)(-1.0000000000000000e+00)*(sin(xd[5]))); a[49] = ((real_t)(-1.0000000000000000e+00)*(sin(xd[5]))); a[50] = (cos(xd[5])); a[51] = (cos(xd[5])); a[52] = ((real_t)(-1.0000000000000000e+00)*(sin(xd[5]))); a[53] = (sin(xd[3])); a[54] = (cos(xd[4])); a[55] = ((real_t)(-1.0000000000000000e+00)*(sin(xd[3]))); a[56] = ((real_t)(-1.0000000000000000e+00)*(sin(xd[4]))); a[57] = (cos(xd[3])); a[58] = ((real_t)(1.0000000000000000e+00)/od[0]); a[59] = ((real_t)(1.0000000000000000e+00)/od[0]); a[60] = ((real_t)(1.0000000000000000e+00)/od[2]); a[61] = ((real_t)(1.0000000000000000e+00)/od[2]); /* Compute outputs: */ out[0] = ((real_t)(0.0000000000000000e+00)-(((a[0]*a[1])*od[4])*u[2])); out[1] = ((real_t)(0.0000000000000000e+00)-(((a[2]*od[4])*a[3])*u[2])); out[2] = ((((real_t)(0.0000000000000000e+00)-((real_t)(0.0000000000000000e+00)-a[4]))*od[4])*u[2]); out[3] = ((((a[5]*a[6])*a[7])+(a[8]*a[9]))*u[2]); out[4] = ((((((((real_t)(0.0000000000000000e+00)-((real_t)(0.0000000000000000e+00)-a[10]))*od[4])*u[2])*xd[2])+(((a[11]*a[6])*a[12])*u[2]))-((((a[13]*a[1])*od[4])*u[2])*xd[0]))-((((a[14]*od[4])*a[3])*u[2])*xd[1])); out[5] = ((((((a[11]*a[15])*a[7])+(a[16]*a[17]))*u[2])-((((a[0]*a[18])*od[4])*u[2])*xd[0]))-((((a[2]*od[4])*a[19])*u[2])*xd[1])); out[6] = (real_t)(0.0000000000000000e+00); out[7] = (real_t)(0.0000000000000000e+00); out[8] = (real_t)(0.0000000000000000e+00); out[9] = (real_t)(0.0000000000000000e+00); out[10] = (real_t)(0.0000000000000000e+00); out[11] = (((((((real_t)(0.0000000000000000e+00)-((real_t)(0.0000000000000000e+00)-a[4]))*od[4])*xd[2])+(((a[11]*a[6])*a[7])+(a[16]*a[9])))-(((a[0]*a[1])*od[4])*xd[0]))-(((a[2]*od[4])*a[3])*xd[1])); out[12] = ((((real_t)(0.0000000000000000e+00)-((((real_t)(0.0000000000000000e+00)-a[20])*a[21])+((a[22]*a[23])*a[24])))*od[5])*u[2]); out[13] = ((real_t)(0.0000000000000000e+00)-((((a[25]*a[26])+((a[27]*a[28])*a[29]))*od[5])*u[2])); out[14] = ((real_t)(0.0000000000000000e+00)-(((a[30]*od[5])*a[31])*u[2])); out[15] = ((((((((real_t)(0.0000000000000000e+00)-((((real_t)(0.0000000000000000e+00)-a[32])*a[21])+((a[22]*a[23])*a[33])))*od[5])*u[2])*xd[0])-(((((a[34]*a[26])+((a[27]*a[35])*a[29]))*od[5])*u[2])*xd[1]))+((((a[36]*a[37])*a[38])-(a[39]*a[40]))*u[2]))-((((a[30]*od[5])*a[41])*u[2])*xd[2])); out[16] = ((((((((real_t)(0.0000000000000000e+00)-((a[22]*a[42])*a[24]))*od[5])*u[2])*xd[0])-(((((a[43]*a[28])*a[29])*od[5])*u[2])*xd[1]))+(((a[44]*a[45])*a[38])*u[2]))-((((a[46]*od[5])*a[31])*u[2])*xd[2])); out[17] = (((((((real_t)(0.0000000000000000e+00)-((((real_t)(0.0000000000000000e+00)-a[20])*a[47])+((a[48]*a[23])*a[24])))*od[5])*u[2])*xd[0])-(((((a[25]*a[49])+((a[27]*a[28])*a[50]))*od[5])*u[2])*xd[1]))+((((a[44]*a[37])*a[51])-(a[52]*a[53]))*u[2])); out[18] = (real_t)(0.0000000000000000e+00); out[19] = (real_t)(0.0000000000000000e+00); out[20] = (real_t)(0.0000000000000000e+00); out[21] = (real_t)(0.0000000000000000e+00); out[22] = (real_t)(0.0000000000000000e+00); out[23] = (((((((real_t)(0.0000000000000000e+00)-((((real_t)(0.0000000000000000e+00)-a[20])*a[21])+((a[22]*a[23])*a[24])))*od[5])*xd[0])-((((a[25]*a[26])+((a[27]*a[28])*a[29]))*od[5])*xd[1]))+(((a[44]*a[37])*a[38])-(a[39]*a[53])))-(((a[30]*od[5])*a[31])*xd[2])); out[24] = (real_t)(0.0000000000000000e+00); out[25] = (real_t)(0.0000000000000000e+00); out[26] = (real_t)(0.0000000000000000e+00); out[27] = ((a[54]*a[55])*u[2]); out[28] = ((a[56]*a[57])*u[2]); out[29] = (real_t)(0.0000000000000000e+00); out[30] = (real_t)(0.0000000000000000e+00); out[31] = (real_t)(0.0000000000000000e+00); out[32] = (real_t)(0.0000000000000000e+00); out[33] = (real_t)(0.0000000000000000e+00); out[34] = (real_t)(0.0000000000000000e+00); out[35] = (a[54]*a[57]); out[36] = (real_t)(0.0000000000000000e+00); out[37] = (real_t)(0.0000000000000000e+00); out[38] = (real_t)(0.0000000000000000e+00); out[39] = (((real_t)(0.0000000000000000e+00)-(real_t)(1.0000000000000000e+00))*a[58]); out[40] = (real_t)(0.0000000000000000e+00); out[41] = (real_t)(0.0000000000000000e+00); out[42] = (real_t)(0.0000000000000000e+00); out[43] = (real_t)(0.0000000000000000e+00); out[44] = (real_t)(0.0000000000000000e+00); out[45] = (od[1]*a[59]); out[46] = (real_t)(0.0000000000000000e+00); out[47] = (real_t)(0.0000000000000000e+00); out[48] = (real_t)(0.0000000000000000e+00); out[49] = (real_t)(0.0000000000000000e+00); out[50] = (real_t)(0.0000000000000000e+00); out[51] = (real_t)(0.0000000000000000e+00); out[52] = (((real_t)(0.0000000000000000e+00)-(real_t)(1.0000000000000000e+00))*a[60]); out[53] = (real_t)(0.0000000000000000e+00); out[54] = (real_t)(0.0000000000000000e+00); out[55] = (real_t)(0.0000000000000000e+00); out[56] = (real_t)(0.0000000000000000e+00); out[57] = (real_t)(0.0000000000000000e+00); out[58] = (od[3]*a[61]); out[59] = (real_t)(0.0000000000000000e+00); out[60] = (real_t)(0.0000000000000000e+00); out[61] = (real_t)(0.0000000000000000e+00); out[62] = (real_t)(0.0000000000000000e+00); out[63] = (real_t)(0.0000000000000000e+00); out[64] = (real_t)(0.0000000000000000e+00); out[65] = (real_t)(0.0000000000000000e+00); out[66] = (real_t)(0.0000000000000000e+00); out[67] = (real_t)(0.0000000000000000e+00); out[68] = (real_t)(0.0000000000000000e+00); out[69] = (real_t)(0.0000000000000000e+00); out[70] = (real_t)(0.0000000000000000e+00); out[71] = (real_t)(0.0000000000000000e+00); out[72] = (real_t)(1.0000000000000000e+00); out[73] = (real_t)(0.0000000000000000e+00); out[74] = (real_t)(0.0000000000000000e+00); out[75] = (real_t)(0.0000000000000000e+00); out[76] = (real_t)(0.0000000000000000e+00); out[77] = (real_t)(0.0000000000000000e+00); out[78] = (real_t)(0.0000000000000000e+00); out[79] = (real_t)(0.0000000000000000e+00); out[80] = (real_t)(0.0000000000000000e+00); out[81] = (real_t)(0.0000000000000000e+00); out[82] = (real_t)(0.0000000000000000e+00); out[83] = (real_t)(0.0000000000000000e+00); out[84] = (real_t)(0.0000000000000000e+00); out[85] = (real_t)(1.0000000000000000e+00); out[86] = (real_t)(0.0000000000000000e+00); out[87] = (real_t)(0.0000000000000000e+00); out[88] = (real_t)(0.0000000000000000e+00); out[89] = (real_t)(0.0000000000000000e+00); out[90] = (real_t)(0.0000000000000000e+00); out[91] = (real_t)(0.0000000000000000e+00); out[92] = (real_t)(0.0000000000000000e+00); out[93] = (real_t)(0.0000000000000000e+00); out[94] = (real_t)(0.0000000000000000e+00); out[95] = (real_t)(0.0000000000000000e+00); out[96] = (real_t)(0.0000000000000000e+00); out[97] = (real_t)(0.0000000000000000e+00); out[98] = (real_t)(1.0000000000000000e+00); out[99] = (real_t)(0.0000000000000000e+00); out[100] = (real_t)(0.0000000000000000e+00); out[101] = (real_t)(0.0000000000000000e+00); out[102] = (real_t)(0.0000000000000000e+00); out[103] = (real_t)(0.0000000000000000e+00); out[104] = (real_t)(0.0000000000000000e+00); out[105] = (real_t)(0.0000000000000000e+00); out[106] = (real_t)(0.0000000000000000e+00); out[107] = (real_t)(0.0000000000000000e+00); } void acado_solve_dim9_triangular( real_t* const A, real_t* const b ) { b[8] = b[8]/A[80]; b[7] -= + A[71]*b[8]; b[7] = b[7]/A[70]; b[6] -= + A[62]*b[8]; b[6] -= + A[61]*b[7]; b[6] = b[6]/A[60]; b[5] -= + A[53]*b[8]; b[5] -= + A[52]*b[7]; b[5] -= + A[51]*b[6]; b[5] = b[5]/A[50]; b[4] -= + A[44]*b[8]; b[4] -= + A[43]*b[7]; b[4] -= + A[42]*b[6]; b[4] -= + A[41]*b[5]; b[4] = b[4]/A[40]; b[3] -= + A[35]*b[8]; b[3] -= + A[34]*b[7]; b[3] -= + A[33]*b[6]; b[3] -= + A[32]*b[5]; b[3] -= + A[31]*b[4]; b[3] = b[3]/A[30]; b[2] -= + A[26]*b[8]; b[2] -= + A[25]*b[7]; b[2] -= + A[24]*b[6]; b[2] -= + A[23]*b[5]; b[2] -= + A[22]*b[4]; b[2] -= + A[21]*b[3]; b[2] = b[2]/A[20]; b[1] -= + A[17]*b[8]; b[1] -= + A[16]*b[7]; b[1] -= + A[15]*b[6]; b[1] -= + A[14]*b[5]; b[1] -= + A[13]*b[4]; b[1] -= + A[12]*b[3]; b[1] -= + A[11]*b[2]; b[1] = b[1]/A[10]; b[0] -= + A[8]*b[8]; b[0] -= + A[7]*b[7]; b[0] -= + A[6]*b[6]; b[0] -= + A[5]*b[5]; b[0] -= + A[4]*b[4]; b[0] -= + A[3]*b[3]; b[0] -= + A[2]*b[2]; b[0] -= + A[1]*b[1]; b[0] = b[0]/A[0]; } real_t acado_solve_dim9_system( real_t* const A, real_t* const b, int* const rk_perm ) { real_t det; int i; int j; int k; int indexMax; int intSwap; real_t valueMax; real_t temp; for (i = 0; i < 9; ++i) { rk_perm[i] = i; } det = 1.0000000000000000e+00; for( i=0; i < (8); i++ ) { indexMax = i; valueMax = fabs(A[i*9+i]); for( j=(i+1); j < 9; j++ ) { temp = fabs(A[j*9+i]); if( temp > valueMax ) { indexMax = j; valueMax = temp; } } if( indexMax > i ) { for (k = 0; k < 9; ++k) { rk_dim9_swap = A[i*9+k]; A[i*9+k] = A[indexMax*9+k]; A[indexMax*9+k] = rk_dim9_swap; } rk_dim9_swap = b[i]; b[i] = b[indexMax]; b[indexMax] = rk_dim9_swap; intSwap = rk_perm[i]; rk_perm[i] = rk_perm[indexMax]; rk_perm[indexMax] = intSwap; } det *= A[i*9+i]; for( j=i+1; j < 9; j++ ) { A[j*9+i] = -A[j*9+i]/A[i*9+i]; for( k=i+1; k < 9; k++ ) { A[j*9+k] += A[j*9+i] * A[i*9+k]; } b[j] += A[j*9+i] * b[i]; } } det *= A[80]; det = fabs(det); acado_solve_dim9_triangular( A, b ); return det; } void acado_solve_dim9_system_reuse( real_t* const A, real_t* const b, int* const rk_perm ) { rk_dim9_bPerm[0] = b[rk_perm[0]]; rk_dim9_bPerm[1] = b[rk_perm[1]]; rk_dim9_bPerm[2] = b[rk_perm[2]]; rk_dim9_bPerm[3] = b[rk_perm[3]]; rk_dim9_bPerm[4] = b[rk_perm[4]]; rk_dim9_bPerm[5] = b[rk_perm[5]]; rk_dim9_bPerm[6] = b[rk_perm[6]]; rk_dim9_bPerm[7] = b[rk_perm[7]]; rk_dim9_bPerm[8] = b[rk_perm[8]]; rk_dim9_bPerm[1] += A[9]*rk_dim9_bPerm[0]; rk_dim9_bPerm[2] += A[18]*rk_dim9_bPerm[0]; rk_dim9_bPerm[2] += A[19]*rk_dim9_bPerm[1]; rk_dim9_bPerm[3] += A[27]*rk_dim9_bPerm[0]; rk_dim9_bPerm[3] += A[28]*rk_dim9_bPerm[1]; rk_dim9_bPerm[3] += A[29]*rk_dim9_bPerm[2]; rk_dim9_bPerm[4] += A[36]*rk_dim9_bPerm[0]; rk_dim9_bPerm[4] += A[37]*rk_dim9_bPerm[1]; rk_dim9_bPerm[4] += A[38]*rk_dim9_bPerm[2]; rk_dim9_bPerm[4] += A[39]*rk_dim9_bPerm[3]; rk_dim9_bPerm[5] += A[45]*rk_dim9_bPerm[0]; rk_dim9_bPerm[5] += A[46]*rk_dim9_bPerm[1]; rk_dim9_bPerm[5] += A[47]*rk_dim9_bPerm[2]; rk_dim9_bPerm[5] += A[48]*rk_dim9_bPerm[3]; rk_dim9_bPerm[5] += A[49]*rk_dim9_bPerm[4]; rk_dim9_bPerm[6] += A[54]*rk_dim9_bPerm[0]; rk_dim9_bPerm[6] += A[55]*rk_dim9_bPerm[1]; rk_dim9_bPerm[6] += A[56]*rk_dim9_bPerm[2]; rk_dim9_bPerm[6] += A[57]*rk_dim9_bPerm[3]; rk_dim9_bPerm[6] += A[58]*rk_dim9_bPerm[4]; rk_dim9_bPerm[6] += A[59]*rk_dim9_bPerm[5]; rk_dim9_bPerm[7] += A[63]*rk_dim9_bPerm[0]; rk_dim9_bPerm[7] += A[64]*rk_dim9_bPerm[1]; rk_dim9_bPerm[7] += A[65]*rk_dim9_bPerm[2]; rk_dim9_bPerm[7] += A[66]*rk_dim9_bPerm[3]; rk_dim9_bPerm[7] += A[67]*rk_dim9_bPerm[4]; rk_dim9_bPerm[7] += A[68]*rk_dim9_bPerm[5]; rk_dim9_bPerm[7] += A[69]*rk_dim9_bPerm[6]; rk_dim9_bPerm[8] += A[72]*rk_dim9_bPerm[0]; rk_dim9_bPerm[8] += A[73]*rk_dim9_bPerm[1]; rk_dim9_bPerm[8] += A[74]*rk_dim9_bPerm[2]; rk_dim9_bPerm[8] += A[75]*rk_dim9_bPerm[3]; rk_dim9_bPerm[8] += A[76]*rk_dim9_bPerm[4]; rk_dim9_bPerm[8] += A[77]*rk_dim9_bPerm[5]; rk_dim9_bPerm[8] += A[78]*rk_dim9_bPerm[6]; rk_dim9_bPerm[8] += A[79]*rk_dim9_bPerm[7]; acado_solve_dim9_triangular( A, rk_dim9_bPerm ); b[0] = rk_dim9_bPerm[0]; b[1] = rk_dim9_bPerm[1]; b[2] = rk_dim9_bPerm[2]; b[3] = rk_dim9_bPerm[3]; b[4] = rk_dim9_bPerm[4]; b[5] = rk_dim9_bPerm[5]; b[6] = rk_dim9_bPerm[6]; b[7] = rk_dim9_bPerm[7]; b[8] = rk_dim9_bPerm[8]; } /** Column vector of size: 1 */ static const real_t acado_Ah_mat[ 1 ] = { 5.0000000000000003e-02 }; /* Fixed step size:0.1 */ int acado_integrate( real_t* const rk_eta, int resetIntegrator ) { int error; int i; int j; int k; int run; int run1; int tmp_index1; int tmp_index2; real_t det; rk_ttt = 0.0000000000000000e+00; rk_xxx[9] = rk_eta[117]; rk_xxx[10] = rk_eta[118]; rk_xxx[11] = rk_eta[119]; rk_xxx[12] = rk_eta[120]; rk_xxx[13] = rk_eta[121]; rk_xxx[14] = rk_eta[122]; rk_xxx[15] = rk_eta[123]; rk_xxx[16] = rk_eta[124]; rk_xxx[17] = rk_eta[125]; rk_xxx[18] = rk_eta[126]; rk_xxx[19] = rk_eta[127]; rk_xxx[20] = rk_eta[128]; for (run = 0; run < 1; ++run) { if( resetIntegrator ) { for (i = 0; i < 1; ++i) { for (run1 = 0; run1 < 1; ++run1) { for (j = 0; j < 9; ++j) { rk_xxx[j] = rk_eta[j]; tmp_index1 = j; rk_xxx[j] += + acado_Ah_mat[run1]*rk_kkk[tmp_index1]; } acado_diffs( rk_xxx, &(rk_diffsTemp2[ run1 * 108 ]) ); for (j = 0; j < 9; ++j) { tmp_index1 = (run1 * 9) + (j); rk_A[tmp_index1 * 9] = + acado_Ah_mat[run1]*rk_diffsTemp2[(run1 * 108) + (j * 12)]; rk_A[tmp_index1 * 9 + 1] = + acado_Ah_mat[run1]*rk_diffsTemp2[(run1 * 108) + (j * 12 + 1)]; rk_A[tmp_index1 * 9 + 2] = + acado_Ah_mat[run1]*rk_diffsTemp2[(run1 * 108) + (j * 12 + 2)]; rk_A[tmp_index1 * 9 + 3] = + acado_Ah_mat[run1]*rk_diffsTemp2[(run1 * 108) + (j * 12 + 3)]; rk_A[tmp_index1 * 9 + 4] = + acado_Ah_mat[run1]*rk_diffsTemp2[(run1 * 108) + (j * 12 + 4)]; rk_A[tmp_index1 * 9 + 5] = + acado_Ah_mat[run1]*rk_diffsTemp2[(run1 * 108) + (j * 12 + 5)]; rk_A[tmp_index1 * 9 + 6] = + acado_Ah_mat[run1]*rk_diffsTemp2[(run1 * 108) + (j * 12 + 6)]; rk_A[tmp_index1 * 9 + 7] = + acado_Ah_mat[run1]*rk_diffsTemp2[(run1 * 108) + (j * 12 + 7)]; rk_A[tmp_index1 * 9 + 8] = + acado_Ah_mat[run1]*rk_diffsTemp2[(run1 * 108) + (j * 12 + 8)]; if( 0 == run1 ) rk_A[(tmp_index1 * 9) + (j)] -= 1.0000000000000000e+00; } acado_rhs( rk_xxx, rk_rhsTemp ); rk_b[run1 * 9] = rk_kkk[run1] - rk_rhsTemp[0]; rk_b[run1 * 9 + 1] = rk_kkk[run1 + 1] - rk_rhsTemp[1]; rk_b[run1 * 9 + 2] = rk_kkk[run1 + 2] - rk_rhsTemp[2]; rk_b[run1 * 9 + 3] = rk_kkk[run1 + 3] - rk_rhsTemp[3]; rk_b[run1 * 9 + 4] = rk_kkk[run1 + 4] - rk_rhsTemp[4]; rk_b[run1 * 9 + 5] = rk_kkk[run1 + 5] - rk_rhsTemp[5]; rk_b[run1 * 9 + 6] = rk_kkk[run1 + 6] - rk_rhsTemp[6]; rk_b[run1 * 9 + 7] = rk_kkk[run1 + 7] - rk_rhsTemp[7]; rk_b[run1 * 9 + 8] = rk_kkk[run1 + 8] - rk_rhsTemp[8]; } det = acado_solve_dim9_system( rk_A, rk_b, rk_dim9_perm ); for (j = 0; j < 1; ++j) { rk_kkk[j] += rk_b[j * 9]; rk_kkk[j + 1] += rk_b[j * 9 + 1]; rk_kkk[j + 2] += rk_b[j * 9 + 2]; rk_kkk[j + 3] += rk_b[j * 9 + 3]; rk_kkk[j + 4] += rk_b[j * 9 + 4]; rk_kkk[j + 5] += rk_b[j * 9 + 5]; rk_kkk[j + 6] += rk_b[j * 9 + 6]; rk_kkk[j + 7] += rk_b[j * 9 + 7]; rk_kkk[j + 8] += rk_b[j * 9 + 8]; } } } for (i = 0; i < 2; ++i) { for (run1 = 0; run1 < 1; ++run1) { for (j = 0; j < 9; ++j) { rk_xxx[j] = rk_eta[j]; tmp_index1 = j; rk_xxx[j] += + acado_Ah_mat[run1]*rk_kkk[tmp_index1]; } acado_rhs( rk_xxx, rk_rhsTemp ); rk_b[run1 * 9] = rk_kkk[run1] - rk_rhsTemp[0]; rk_b[run1 * 9 + 1] = rk_kkk[run1 + 1] - rk_rhsTemp[1]; rk_b[run1 * 9 + 2] = rk_kkk[run1 + 2] - rk_rhsTemp[2]; rk_b[run1 * 9 + 3] = rk_kkk[run1 + 3] - rk_rhsTemp[3]; rk_b[run1 * 9 + 4] = rk_kkk[run1 + 4] - rk_rhsTemp[4]; rk_b[run1 * 9 + 5] = rk_kkk[run1 + 5] - rk_rhsTemp[5]; rk_b[run1 * 9 + 6] = rk_kkk[run1 + 6] - rk_rhsTemp[6]; rk_b[run1 * 9 + 7] = rk_kkk[run1 + 7] - rk_rhsTemp[7]; rk_b[run1 * 9 + 8] = rk_kkk[run1 + 8] - rk_rhsTemp[8]; } acado_solve_dim9_system_reuse( rk_A, rk_b, rk_dim9_perm ); for (j = 0; j < 1; ++j) { rk_kkk[j] += rk_b[j * 9]; rk_kkk[j + 1] += rk_b[j * 9 + 1]; rk_kkk[j + 2] += rk_b[j * 9 + 2]; rk_kkk[j + 3] += rk_b[j * 9 + 3]; rk_kkk[j + 4] += rk_b[j * 9 + 4]; rk_kkk[j + 5] += rk_b[j * 9 + 5]; rk_kkk[j + 6] += rk_b[j * 9 + 6]; rk_kkk[j + 7] += rk_b[j * 9 + 7]; rk_kkk[j + 8] += rk_b[j * 9 + 8]; } } for (run1 = 0; run1 < 1; ++run1) { for (j = 0; j < 9; ++j) { rk_xxx[j] = rk_eta[j]; tmp_index1 = j; rk_xxx[j] += + acado_Ah_mat[run1]*rk_kkk[tmp_index1]; } acado_diffs( rk_xxx, &(rk_diffsTemp2[ run1 * 108 ]) ); for (j = 0; j < 9; ++j) { tmp_index1 = (run1 * 9) + (j); rk_A[tmp_index1 * 9] = + acado_Ah_mat[run1]*rk_diffsTemp2[(run1 * 108) + (j * 12)]; rk_A[tmp_index1 * 9 + 1] = + acado_Ah_mat[run1]*rk_diffsTemp2[(run1 * 108) + (j * 12 + 1)]; rk_A[tmp_index1 * 9 + 2] = + acado_Ah_mat[run1]*rk_diffsTemp2[(run1 * 108) + (j * 12 + 2)]; rk_A[tmp_index1 * 9 + 3] = + acado_Ah_mat[run1]*rk_diffsTemp2[(run1 * 108) + (j * 12 + 3)]; rk_A[tmp_index1 * 9 + 4] = + acado_Ah_mat[run1]*rk_diffsTemp2[(run1 * 108) + (j * 12 + 4)]; rk_A[tmp_index1 * 9 + 5] = + acado_Ah_mat[run1]*rk_diffsTemp2[(run1 * 108) + (j * 12 + 5)]; rk_A[tmp_index1 * 9 + 6] = + acado_Ah_mat[run1]*rk_diffsTemp2[(run1 * 108) + (j * 12 + 6)]; rk_A[tmp_index1 * 9 + 7] = + acado_Ah_mat[run1]*rk_diffsTemp2[(run1 * 108) + (j * 12 + 7)]; rk_A[tmp_index1 * 9 + 8] = + acado_Ah_mat[run1]*rk_diffsTemp2[(run1 * 108) + (j * 12 + 8)]; if( 0 == run1 ) rk_A[(tmp_index1 * 9) + (j)] -= 1.0000000000000000e+00; } } for (run1 = 0; run1 < 9; ++run1) { for (i = 0; i < 1; ++i) { rk_b[i * 9] = - rk_diffsTemp2[(i * 108) + (run1)]; rk_b[i * 9 + 1] = - rk_diffsTemp2[(i * 108) + (run1 + 12)]; rk_b[i * 9 + 2] = - rk_diffsTemp2[(i * 108) + (run1 + 24)]; rk_b[i * 9 + 3] = - rk_diffsTemp2[(i * 108) + (run1 + 36)]; rk_b[i * 9 + 4] = - rk_diffsTemp2[(i * 108) + (run1 + 48)]; rk_b[i * 9 + 5] = - rk_diffsTemp2[(i * 108) + (run1 + 60)]; rk_b[i * 9 + 6] = - rk_diffsTemp2[(i * 108) + (run1 + 72)]; rk_b[i * 9 + 7] = - rk_diffsTemp2[(i * 108) + (run1 + 84)]; rk_b[i * 9 + 8] = - rk_diffsTemp2[(i * 108) + (run1 + 96)]; } if( 0 == run1 ) { det = acado_solve_dim9_system( rk_A, rk_b, rk_dim9_perm ); } else { acado_solve_dim9_system_reuse( rk_A, rk_b, rk_dim9_perm ); } for (i = 0; i < 1; ++i) { rk_diffK[i] = rk_b[i * 9]; rk_diffK[i + 1] = rk_b[i * 9 + 1]; rk_diffK[i + 2] = rk_b[i * 9 + 2]; rk_diffK[i + 3] = rk_b[i * 9 + 3]; rk_diffK[i + 4] = rk_b[i * 9 + 4]; rk_diffK[i + 5] = rk_b[i * 9 + 5]; rk_diffK[i + 6] = rk_b[i * 9 + 6]; rk_diffK[i + 7] = rk_b[i * 9 + 7]; rk_diffK[i + 8] = rk_b[i * 9 + 8]; } for (i = 0; i < 9; ++i) { rk_diffsNew2[(i * 12) + (run1)] = (i == run1-0); rk_diffsNew2[(i * 12) + (run1)] += + rk_diffK[i]*(real_t)1.0000000000000001e-01; } } for (run1 = 0; run1 < 3; ++run1) { for (i = 0; i < 1; ++i) { for (j = 0; j < 9; ++j) { tmp_index1 = (i * 9) + (j); tmp_index2 = (run1) + (j * 12); rk_b[tmp_index1] = - rk_diffsTemp2[(i * 108) + (tmp_index2 + 9)]; } } acado_solve_dim9_system_reuse( rk_A, rk_b, rk_dim9_perm ); for (i = 0; i < 1; ++i) { rk_diffK[i] = rk_b[i * 9]; rk_diffK[i + 1] = rk_b[i * 9 + 1]; rk_diffK[i + 2] = rk_b[i * 9 + 2]; rk_diffK[i + 3] = rk_b[i * 9 + 3]; rk_diffK[i + 4] = rk_b[i * 9 + 4]; rk_diffK[i + 5] = rk_b[i * 9 + 5]; rk_diffK[i + 6] = rk_b[i * 9 + 6]; rk_diffK[i + 7] = rk_b[i * 9 + 7]; rk_diffK[i + 8] = rk_b[i * 9 + 8]; } for (i = 0; i < 9; ++i) { rk_diffsNew2[(i * 12) + (run1 + 9)] = + rk_diffK[i]*(real_t)1.0000000000000001e-01; } } rk_eta[0] += + rk_kkk[0]*(real_t)1.0000000000000001e-01; rk_eta[1] += + rk_kkk[1]*(real_t)1.0000000000000001e-01; rk_eta[2] += + rk_kkk[2]*(real_t)1.0000000000000001e-01; rk_eta[3] += + rk_kkk[3]*(real_t)1.0000000000000001e-01; rk_eta[4] += + rk_kkk[4]*(real_t)1.0000000000000001e-01; rk_eta[5] += + rk_kkk[5]*(real_t)1.0000000000000001e-01; rk_eta[6] += + rk_kkk[6]*(real_t)1.0000000000000001e-01; rk_eta[7] += + rk_kkk[7]*(real_t)1.0000000000000001e-01; rk_eta[8] += + rk_kkk[8]*(real_t)1.0000000000000001e-01; for (i = 0; i < 9; ++i) { for (j = 0; j < 9; ++j) { tmp_index2 = (j) + (i * 9); rk_eta[tmp_index2 + 9] = rk_diffsNew2[(i * 12) + (j)]; } for (j = 0; j < 3; ++j) { tmp_index2 = (j) + (i * 3); rk_eta[tmp_index2 + 90] = rk_diffsNew2[(i * 12) + (j + 9)]; } } resetIntegrator = 0; rk_ttt += 1.0000000000000000e+00; } for (i = 0; i < 9; ++i) { } if( det < 1e-12 ) { error = 2; } else if( det < 1e-6 ) { error = 1; } else { error = 0; } return error; }
GB_binop__isgt_int8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isgt_int8) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__isgt_int8) // A.*B function (eWiseMult): GB (_AemultB_03__isgt_int8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isgt_int8) // A*D function (colscale): GB (_AxD__isgt_int8) // D*A function (rowscale): GB (_DxB__isgt_int8) // C+=B function (dense accum): GB (_Cdense_accumB__isgt_int8) // C+=b function (dense accum): GB (_Cdense_accumb__isgt_int8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isgt_int8) // C=scalar+B GB (_bind1st__isgt_int8) // C=scalar+B' GB (_bind1st_tran__isgt_int8) // C=A+scalar GB (_bind2nd__isgt_int8) // C=A'+scalar GB (_bind2nd_tran__isgt_int8) // C type: int8_t // A type: int8_t // B,b type: int8_t // BinaryOp: cij = (aij > bij) #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ int8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int8_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x > y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISGT || GxB_NO_INT8 || GxB_NO_ISGT_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__isgt_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isgt_int8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isgt_int8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__isgt_int8) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__isgt_int8) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isgt_int8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__isgt_int8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isgt_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__isgt_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isgt_int8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isgt_int8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *Cx = (int8_t *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = Bx [p] ; Cx [p] = (x > bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isgt_int8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int8_t *Cx = (int8_t *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int8_t aij = Ax [p] ; Cx [p] = (aij > y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = (x > aij) ; \ } GrB_Info GB (_bind1st_tran__isgt_int8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = (aij > y) ; \ } GrB_Info GB (_bind2nd_tran__isgt_int8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
implicit_blender.c
/* * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * The Original Code is Copyright (C) Blender Foundation * All rights reserved. */ /** \file * \ingroup bph */ #include "implicit.h" #ifdef IMPLICIT_SOLVER_BLENDER # include "MEM_guardedalloc.h" # include "DNA_meshdata_types.h" # include "DNA_object_force_types.h" # include "DNA_object_types.h" # include "DNA_scene_types.h" # include "DNA_texture_types.h" # include "BLI_math.h" # include "BLI_utildefines.h" # include "BKE_cloth.h" # include "BKE_collision.h" # include "BKE_effect.h" # include "BPH_mass_spring.h" # ifdef __GNUC__ # pragma GCC diagnostic ignored "-Wtype-limits" # endif # ifdef _OPENMP # define CLOTH_OPENMP_LIMIT 512 # endif //#define DEBUG_TIME # ifdef DEBUG_TIME # include "PIL_time.h" # endif static float I[3][3] = {{1, 0, 0}, {0, 1, 0}, {0, 0, 1}}; static float ZERO[3][3] = {{0, 0, 0}, {0, 0, 0}, {0, 0, 0}}; # if 0 # define C99 # ifdef C99 # defineDO_INLINE inline # else # defineDO_INLINE static # endif # endif /* if 0 */ struct Cloth; ////////////////////////////////////////// /* fast vector / matrix library, enhancements are welcome :) -dg */ ///////////////////////////////////////// /* DEFINITIONS */ typedef float lfVector[3]; typedef struct fmatrix3x3 { float m[3][3]; /* 3x3 matrix */ unsigned int c, r; /* column and row number */ /* int pinned; // is this vertex allowed to move? */ float n1, n2, n3; /* three normal vectors for collision constrains */ unsigned int vcount; /* vertex count */ unsigned int scount; /* spring count */ } fmatrix3x3; /////////////////////////// // float[3] vector /////////////////////////// /* simple vector code */ /* STATUS: verified */ DO_INLINE void mul_fvector_S(float to[3], const float from[3], float scalar) { to[0] = from[0] * scalar; to[1] = from[1] * scalar; to[2] = from[2] * scalar; } /* simple v^T * v product ("outer product") */ /* STATUS: HAS TO BE verified (*should* work) */ DO_INLINE void mul_fvectorT_fvector(float to[3][3], float vectorA[3], float vectorB[3]) { mul_fvector_S(to[0], vectorB, vectorA[0]); mul_fvector_S(to[1], vectorB, vectorA[1]); mul_fvector_S(to[2], vectorB, vectorA[2]); } /* simple v^T * v product with scalar ("outer product") */ /* STATUS: HAS TO BE verified (*should* work) */ DO_INLINE void mul_fvectorT_fvectorS(float to[3][3], float vectorA[3], float vectorB[3], float aS) { mul_fvectorT_fvector(to, vectorA, vectorB); mul_fvector_S(to[0], to[0], aS); mul_fvector_S(to[1], to[1], aS); mul_fvector_S(to[2], to[2], aS); } # if 0 /* printf vector[3] on console: for debug output */ static void print_fvector(float m3[3]) { printf("%f\n%f\n%f\n\n", m3[0], m3[1], m3[2]); } /////////////////////////// // long float vector float (*)[3] /////////////////////////// /* print long vector on console: for debug output */ DO_INLINE void print_lfvector(float (*fLongVector)[3], unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { print_fvector(fLongVector[i]); } } # endif /* create long vector */ DO_INLINE lfVector *create_lfvector(unsigned int verts) { /* TODO: check if memory allocation was successful */ return (lfVector *)MEM_callocN(verts * sizeof(lfVector), "cloth_implicit_alloc_vector"); // return (lfVector *)cloth_aligned_malloc(&MEMORY_BASE, verts * sizeof(lfVector)); } /* delete long vector */ DO_INLINE void del_lfvector(float (*fLongVector)[3]) { if (fLongVector != NULL) { MEM_freeN(fLongVector); // cloth_aligned_free(&MEMORY_BASE, fLongVector); } } /* copy long vector */ DO_INLINE void cp_lfvector(float (*to)[3], float (*from)[3], unsigned int verts) { memcpy(to, from, verts * sizeof(lfVector)); } /* init long vector with float[3] */ DO_INLINE void init_lfvector(float (*fLongVector)[3], float vector[3], unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { copy_v3_v3(fLongVector[i], vector); } } /* zero long vector with float[3] */ DO_INLINE void zero_lfvector(float (*to)[3], unsigned int verts) { memset(to, 0.0f, verts * sizeof(lfVector)); } /* multiply long vector with scalar*/ DO_INLINE void mul_lfvectorS(float (*to)[3], float (*fLongVector)[3], float scalar, unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { mul_fvector_S(to[i], fLongVector[i], scalar); } } /* multiply long vector with scalar*/ /* A -= B * float */ DO_INLINE void submul_lfvectorS(float (*to)[3], float (*fLongVector)[3], float scalar, unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { VECSUBMUL(to[i], fLongVector[i], scalar); } } /* dot product for big vector */ DO_INLINE float dot_lfvector(float (*fLongVectorA)[3], float (*fLongVectorB)[3], unsigned int verts) { long i = 0; float temp = 0.0; // XXX brecht, disabled this for now (first schedule line was already disabled), // due to non-commutative nature of floating point ops this makes the sim give // different results each time you run it! // schedule(guided, 2) //#pragma omp parallel for reduction(+: temp) if (verts > CLOTH_OPENMP_LIMIT) for (i = 0; i < (long)verts; i++) { temp += dot_v3v3(fLongVectorA[i], fLongVectorB[i]); } return temp; } /* A = B + C --> for big vector */ DO_INLINE void add_lfvector_lfvector(float (*to)[3], float (*fLongVectorA)[3], float (*fLongVectorB)[3], unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { add_v3_v3v3(to[i], fLongVectorA[i], fLongVectorB[i]); } } /* A = B + C * float --> for big vector */ DO_INLINE void add_lfvector_lfvectorS(float (*to)[3], float (*fLongVectorA)[3], float (*fLongVectorB)[3], float bS, unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { VECADDS(to[i], fLongVectorA[i], fLongVectorB[i], bS); } } /* A = B * float + C * float --> for big vector */ DO_INLINE void add_lfvectorS_lfvectorS(float (*to)[3], float (*fLongVectorA)[3], float aS, float (*fLongVectorB)[3], float bS, unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { VECADDSS(to[i], fLongVectorA[i], aS, fLongVectorB[i], bS); } } /* A = B - C * float --> for big vector */ DO_INLINE void sub_lfvector_lfvectorS(float (*to)[3], float (*fLongVectorA)[3], float (*fLongVectorB)[3], float bS, unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { VECSUBS(to[i], fLongVectorA[i], fLongVectorB[i], bS); } } /* A = B - C --> for big vector */ DO_INLINE void sub_lfvector_lfvector(float (*to)[3], float (*fLongVectorA)[3], float (*fLongVectorB)[3], unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { sub_v3_v3v3(to[i], fLongVectorA[i], fLongVectorB[i]); } } /////////////////////////// // 3x3 matrix /////////////////////////// # if 0 /* printf 3x3 matrix on console: for debug output */ static void print_fmatrix(float m3[3][3]) { printf("%f\t%f\t%f\n", m3[0][0], m3[0][1], m3[0][2]); printf("%f\t%f\t%f\n", m3[1][0], m3[1][1], m3[1][2]); printf("%f\t%f\t%f\n\n", m3[2][0], m3[2][1], m3[2][2]); } static void print_sparse_matrix(fmatrix3x3 *m) { if (m) { unsigned int i; for (i = 0; i < m[0].vcount + m[0].scount; i++) { printf("%d:\n", i); print_fmatrix(m[i].m); } } } # endif # if 0 static void print_lvector(lfVector *v, int numverts) { int i; for (i = 0; i < numverts; i++) { if (i > 0) { printf("\n"); } printf("%f,\n", v[i][0]); printf("%f,\n", v[i][1]); printf("%f,\n", v[i][2]); } } # endif # if 0 static void print_bfmatrix(fmatrix3x3 *m) { int tot = m[0].vcount + m[0].scount; int size = m[0].vcount * 3; float *t = MEM_callocN(sizeof(float) * size * size, "bfmatrix"); int q, i, j; for (q = 0; q < tot; q++) { int k = 3 * m[q].r; int l = 3 * m[q].c; for (j = 0; j < 3; j++) { for (i = 0; i < 3; i++) { // if (t[k + i + (l + j) * size] != 0.0f) { // printf("warning: overwriting value at %d, %d\n", m[q].r, m[q].c); // } if (k == l) { t[k + i + (k + j) * size] += m[q].m[i][j]; } else { t[k + i + (l + j) * size] += m[q].m[i][j]; t[l + j + (k + i) * size] += m[q].m[j][i]; } } } } for (j = 0; j < size; j++) { if (j > 0 && j % 3 == 0) { printf("\n"); } for (i = 0; i < size; i++) { if (i > 0 && i % 3 == 0) { printf(" "); } implicit_print_matrix_elem(t[i + j * size]); } printf("\n"); } MEM_freeN(t); } # endif /* copy 3x3 matrix */ DO_INLINE void cp_fmatrix(float to[3][3], float from[3][3]) { // memcpy(to, from, sizeof (float) * 9); copy_v3_v3(to[0], from[0]); copy_v3_v3(to[1], from[1]); copy_v3_v3(to[2], from[2]); } /* copy 3x3 matrix */ DO_INLINE void initdiag_fmatrixS(float to[3][3], float aS) { cp_fmatrix(to, ZERO); to[0][0] = aS; to[1][1] = aS; to[2][2] = aS; } # if 0 /* calculate determinant of 3x3 matrix */ DO_INLINE float det_fmatrix(float m[3][3]) { return m[0][0] * m[1][1] * m[2][2] + m[1][0] * m[2][1] * m[0][2] + m[0][1] * m[1][2] * m[2][0] - m[0][0] * m[1][2] * m[2][1] - m[0][1] * m[1][0] * m[2][2] - m[2][0] * m[1][1] * m[0][2]; } DO_INLINE void inverse_fmatrix(float to[3][3], float from[3][3]) { unsigned int i, j; float d; if ((d = det_fmatrix(from)) == 0) { printf("can't build inverse"); exit(0); } for (i = 0; i < 3; i++) { for (j = 0; j < 3; j++) { int i1 = (i + 1) % 3; int i2 = (i + 2) % 3; int j1 = (j + 1) % 3; int j2 = (j + 2) % 3; /** Reverse indexes i&j to take transpose. */ to[j][i] = (from[i1][j1] * from[i2][j2] - from[i1][j2] * from[i2][j1]) / d; /** * <pre> * if (i == j) { * to[i][j] = 1.0f / from[i][j]; * } * else { * to[i][j] = 0; * } * </pre> */ } } } # endif /* 3x3 matrix multiplied by a scalar */ /* STATUS: verified */ DO_INLINE void mul_fmatrix_S(float matrix[3][3], float scalar) { mul_fvector_S(matrix[0], matrix[0], scalar); mul_fvector_S(matrix[1], matrix[1], scalar); mul_fvector_S(matrix[2], matrix[2], scalar); } /* a vector multiplied by a 3x3 matrix */ /* STATUS: verified */ DO_INLINE void mul_fvector_fmatrix(float *to, const float *from, float matrix[3][3]) { to[0] = matrix[0][0] * from[0] + matrix[1][0] * from[1] + matrix[2][0] * from[2]; to[1] = matrix[0][1] * from[0] + matrix[1][1] * from[1] + matrix[2][1] * from[2]; to[2] = matrix[0][2] * from[0] + matrix[1][2] * from[1] + matrix[2][2] * from[2]; } /* 3x3 matrix multiplied by a vector */ /* STATUS: verified */ DO_INLINE void mul_fmatrix_fvector(float *to, float matrix[3][3], float from[3]) { to[0] = dot_v3v3(matrix[0], from); to[1] = dot_v3v3(matrix[1], from); to[2] = dot_v3v3(matrix[2], from); } /* 3x3 matrix addition with 3x3 matrix */ DO_INLINE void add_fmatrix_fmatrix(float to[3][3], float matrixA[3][3], float matrixB[3][3]) { add_v3_v3v3(to[0], matrixA[0], matrixB[0]); add_v3_v3v3(to[1], matrixA[1], matrixB[1]); add_v3_v3v3(to[2], matrixA[2], matrixB[2]); } /* A -= B*x + C*y (3x3 matrix sub-addition with 3x3 matrix) */ DO_INLINE void subadd_fmatrixS_fmatrixS( float to[3][3], float matrixA[3][3], float aS, float matrixB[3][3], float bS) { VECSUBADDSS(to[0], matrixA[0], aS, matrixB[0], bS); VECSUBADDSS(to[1], matrixA[1], aS, matrixB[1], bS); VECSUBADDSS(to[2], matrixA[2], aS, matrixB[2], bS); } /* A = B - C (3x3 matrix subtraction with 3x3 matrix) */ DO_INLINE void sub_fmatrix_fmatrix(float to[3][3], float matrixA[3][3], float matrixB[3][3]) { sub_v3_v3v3(to[0], matrixA[0], matrixB[0]); sub_v3_v3v3(to[1], matrixA[1], matrixB[1]); sub_v3_v3v3(to[2], matrixA[2], matrixB[2]); } ///////////////////////////////////////////////////////////////// // special functions ///////////////////////////////////////////////////////////////// /* 3x3 matrix multiplied+added by a vector */ /* STATUS: verified */ DO_INLINE void muladd_fmatrix_fvector(float to[3], float matrix[3][3], float from[3]) { to[0] += dot_v3v3(matrix[0], from); to[1] += dot_v3v3(matrix[1], from); to[2] += dot_v3v3(matrix[2], from); } DO_INLINE void muladd_fmatrixT_fvector(float to[3], float matrix[3][3], const float from[3]) { to[0] += matrix[0][0] * from[0] + matrix[1][0] * from[1] + matrix[2][0] * from[2]; to[1] += matrix[0][1] * from[0] + matrix[1][1] * from[1] + matrix[2][1] * from[2]; to[2] += matrix[0][2] * from[0] + matrix[1][2] * from[1] + matrix[2][2] * from[2]; } BLI_INLINE void outerproduct(float r[3][3], const float a[3], const float b[3]) { mul_v3_v3fl(r[0], a, b[0]); mul_v3_v3fl(r[1], a, b[1]); mul_v3_v3fl(r[2], a, b[2]); } BLI_INLINE void cross_m3_v3m3(float r[3][3], const float v[3], float m[3][3]) { cross_v3_v3v3(r[0], v, m[0]); cross_v3_v3v3(r[1], v, m[1]); cross_v3_v3v3(r[2], v, m[2]); } BLI_INLINE void cross_v3_identity(float r[3][3], const float v[3]) { r[0][0] = 0.0f; r[1][0] = v[2]; r[2][0] = -v[1]; r[0][1] = -v[2]; r[1][1] = 0.0f; r[2][1] = v[0]; r[0][2] = v[1]; r[1][2] = -v[0]; r[2][2] = 0.0f; } BLI_INLINE void madd_m3_m3fl(float r[3][3], float m[3][3], float f) { r[0][0] += m[0][0] * f; r[0][1] += m[0][1] * f; r[0][2] += m[0][2] * f; r[1][0] += m[1][0] * f; r[1][1] += m[1][1] * f; r[1][2] += m[1][2] * f; r[2][0] += m[2][0] * f; r[2][1] += m[2][1] * f; r[2][2] += m[2][2] * f; } ///////////////////////////////////////////////////////////////// /////////////////////////// // SPARSE SYMMETRIC big matrix with 3x3 matrix entries /////////////////////////// /* printf a big matrix on console: for debug output */ # if 0 static void print_bfmatrix(fmatrix3x3 *m3) { unsigned int i = 0; for (i = 0; i < m3[0].vcount + m3[0].scount; i++) { print_fmatrix(m3[i].m); } } # endif BLI_INLINE void init_fmatrix(fmatrix3x3 *matrix, int r, int c) { matrix->r = r; matrix->c = c; } /* create big matrix */ DO_INLINE fmatrix3x3 *create_bfmatrix(unsigned int verts, unsigned int springs) { // TODO: check if memory allocation was successful */ fmatrix3x3 *temp = (fmatrix3x3 *)MEM_callocN(sizeof(fmatrix3x3) * (verts + springs), "cloth_implicit_alloc_matrix"); int i; temp[0].vcount = verts; temp[0].scount = springs; /* vertex part of the matrix is diagonal blocks */ for (i = 0; i < verts; i++) { init_fmatrix(temp + i, i, i); } return temp; } /* delete big matrix */ DO_INLINE void del_bfmatrix(fmatrix3x3 *matrix) { if (matrix != NULL) { MEM_freeN(matrix); } } /* copy big matrix */ DO_INLINE void cp_bfmatrix(fmatrix3x3 *to, fmatrix3x3 *from) { // TODO bounds checking memcpy(to, from, sizeof(fmatrix3x3) * (from[0].vcount + from[0].scount)); } /* init big matrix */ // slow in parallel DO_INLINE void init_bfmatrix(fmatrix3x3 *matrix, float m3[3][3]) { unsigned int i; for (i = 0; i < matrix[0].vcount + matrix[0].scount; i++) { cp_fmatrix(matrix[i].m, m3); } } /* init the diagonal of big matrix */ // slow in parallel DO_INLINE void initdiag_bfmatrix(fmatrix3x3 *matrix, float m3[3][3]) { unsigned int i, j; float tmatrix[3][3] = {{0, 0, 0}, {0, 0, 0}, {0, 0, 0}}; for (i = 0; i < matrix[0].vcount; i++) { cp_fmatrix(matrix[i].m, m3); } for (j = matrix[0].vcount; j < matrix[0].vcount + matrix[0].scount; j++) { cp_fmatrix(matrix[j].m, tmatrix); } } /* SPARSE SYMMETRIC multiply big matrix with long vector*/ /* STATUS: verified */ DO_INLINE void mul_bfmatrix_lfvector(float (*to)[3], fmatrix3x3 *from, lfVector *fLongVector) { unsigned int vcount = from[0].vcount; lfVector *temp = create_lfvector(vcount); zero_lfvector(to, vcount); # pragma omp parallel sections if (vcount > CLOTH_OPENMP_LIMIT) { # pragma omp section { for (unsigned int i = from[0].vcount; i < from[0].vcount + from[0].scount; i++) { /* This is the lower triangle of the sparse matrix, * therefore multiplication occurs with transposed submatrices. */ muladd_fmatrixT_fvector(to[from[i].c], from[i].m, fLongVector[from[i].r]); } } # pragma omp section { for (unsigned int i = 0; i < from[0].vcount + from[0].scount; i++) { muladd_fmatrix_fvector(temp[from[i].r], from[i].m, fLongVector[from[i].c]); } } } add_lfvector_lfvector(to, to, temp, from[0].vcount); del_lfvector(temp); } /* SPARSE SYMMETRIC sub big matrix with big matrix*/ /* A -= B * float + C * float --> for big matrix */ /* VERIFIED */ DO_INLINE void subadd_bfmatrixS_bfmatrixS( fmatrix3x3 *to, fmatrix3x3 *from, float aS, fmatrix3x3 *matrix, float bS) { unsigned int i = 0; /* process diagonal elements */ for (i = 0; i < matrix[0].vcount + matrix[0].scount; i++) { subadd_fmatrixS_fmatrixS(to[i].m, from[i].m, aS, matrix[i].m, bS); } } /////////////////////////////////////////////////////////////////// // simulator start /////////////////////////////////////////////////////////////////// typedef struct Implicit_Data { /* inputs */ fmatrix3x3 *bigI; /* identity (constant) */ fmatrix3x3 *tfm; /* local coordinate transform */ fmatrix3x3 *M; /* masses */ lfVector *F; /* forces */ fmatrix3x3 *dFdV, *dFdX; /* force jacobians */ int num_blocks; /* number of off-diagonal blocks (springs) */ /* motion state data */ lfVector *X, *Xnew; /* positions */ lfVector *V, *Vnew; /* velocities */ /* internal solver data */ lfVector *B; /* B for A*dV = B */ fmatrix3x3 *A; /* A for A*dV = B */ lfVector *dV; /* velocity change (solution of A*dV = B) */ lfVector *z; /* target velocity in constrained directions */ fmatrix3x3 *S; /* filtering matrix for constraints */ fmatrix3x3 *P, *Pinv; /* pre-conditioning matrix */ } Implicit_Data; Implicit_Data *BPH_mass_spring_solver_create(int numverts, int numsprings) { Implicit_Data *id = (Implicit_Data *)MEM_callocN(sizeof(Implicit_Data), "implicit vecmat"); /* process diagonal elements */ id->tfm = create_bfmatrix(numverts, 0); id->A = create_bfmatrix(numverts, numsprings); id->dFdV = create_bfmatrix(numverts, numsprings); id->dFdX = create_bfmatrix(numverts, numsprings); id->S = create_bfmatrix(numverts, 0); id->Pinv = create_bfmatrix(numverts, numsprings); id->P = create_bfmatrix(numverts, numsprings); id->bigI = create_bfmatrix(numverts, numsprings); // TODO 0 springs id->M = create_bfmatrix(numverts, numsprings); id->X = create_lfvector(numverts); id->Xnew = create_lfvector(numverts); id->V = create_lfvector(numverts); id->Vnew = create_lfvector(numverts); id->F = create_lfvector(numverts); id->B = create_lfvector(numverts); id->dV = create_lfvector(numverts); id->z = create_lfvector(numverts); initdiag_bfmatrix(id->bigI, I); return id; } void BPH_mass_spring_solver_free(Implicit_Data *id) { del_bfmatrix(id->tfm); del_bfmatrix(id->A); del_bfmatrix(id->dFdV); del_bfmatrix(id->dFdX); del_bfmatrix(id->S); del_bfmatrix(id->P); del_bfmatrix(id->Pinv); del_bfmatrix(id->bigI); del_bfmatrix(id->M); del_lfvector(id->X); del_lfvector(id->Xnew); del_lfvector(id->V); del_lfvector(id->Vnew); del_lfvector(id->F); del_lfvector(id->B); del_lfvector(id->dV); del_lfvector(id->z); MEM_freeN(id); } /* ==== Transformation from/to root reference frames ==== */ BLI_INLINE void world_to_root_v3(Implicit_Data *data, int index, float r[3], const float v[3]) { copy_v3_v3(r, v); mul_transposed_m3_v3(data->tfm[index].m, r); } BLI_INLINE void root_to_world_v3(Implicit_Data *data, int index, float r[3], const float v[3]) { mul_v3_m3v3(r, data->tfm[index].m, v); } BLI_INLINE void world_to_root_m3(Implicit_Data *data, int index, float r[3][3], float m[3][3]) { float trot[3][3]; copy_m3_m3(trot, data->tfm[index].m); transpose_m3(trot); mul_m3_m3m3(r, trot, m); } BLI_INLINE void root_to_world_m3(Implicit_Data *data, int index, float r[3][3], float m[3][3]) { mul_m3_m3m3(r, data->tfm[index].m, m); } /* ================================ */ DO_INLINE void filter(lfVector *V, fmatrix3x3 *S) { unsigned int i = 0; for (i = 0; i < S[0].vcount; i++) { mul_m3_v3(S[i].m, V[S[i].r]); } } /* this version of the CG algorithm does not work very well with partial constraints * (where S has non-zero elements). */ # if 0 static int cg_filtered(lfVector *ldV, fmatrix3x3 *lA, lfVector *lB, lfVector *z, fmatrix3x3 *S) { // Solves for unknown X in equation AX=B unsigned int conjgrad_loopcount = 0, conjgrad_looplimit = 100; float conjgrad_epsilon = 0.0001f /* , conjgrad_lasterror=0 */ /* UNUSED */; lfVector *q, *d, *tmp, *r; float s, starget, a, s_prev; unsigned int numverts = lA[0].vcount; q = create_lfvector(numverts); d = create_lfvector(numverts); tmp = create_lfvector(numverts); r = create_lfvector(numverts); // zero_lfvector(ldV, CLOTHPARTICLES); filter(ldV, S); add_lfvector_lfvector(ldV, ldV, z, numverts); // r = B - Mul(tmp, A, X); // just use B if X known to be zero cp_lfvector(r, lB, numverts); mul_bfmatrix_lfvector(tmp, lA, ldV); sub_lfvector_lfvector(r, r, tmp, numverts); filter(r, S); cp_lfvector(d, r, numverts); s = dot_lfvector(r, r, numverts); starget = s * sqrtf(conjgrad_epsilon); while (s > starget && conjgrad_loopcount < conjgrad_looplimit) { // Mul(q, A, d); // q = A*d; mul_bfmatrix_lfvector(q, lA, d); filter(q, S); a = s / dot_lfvector(d, q, numverts); // X = X + d*a; add_lfvector_lfvectorS(ldV, ldV, d, a, numverts); // r = r - q*a; sub_lfvector_lfvectorS(r, r, q, a, numverts); s_prev = s; s = dot_lfvector(r, r, numverts); //d = r+d*(s/s_prev); add_lfvector_lfvectorS(d, r, d, (s / s_prev), numverts); filter(d, S); conjgrad_loopcount++; } /* conjgrad_lasterror = s; */ /* UNUSED */ del_lfvector(q); del_lfvector(d); del_lfvector(tmp); del_lfvector(r); // printf("W/O conjgrad_loopcount: %d\n", conjgrad_loopcount); return conjgrad_loopcount < conjgrad_looplimit; // true means we reached desired accuracy in given time - ie stable } # endif static int cg_filtered(lfVector *ldV, fmatrix3x3 *lA, lfVector *lB, lfVector *z, fmatrix3x3 *S, ImplicitSolverResult *result) { // Solves for unknown X in equation AX=B unsigned int conjgrad_loopcount = 0, conjgrad_looplimit = 100; float conjgrad_epsilon = 0.01f; unsigned int numverts = lA[0].vcount; lfVector *fB = create_lfvector(numverts); lfVector *AdV = create_lfvector(numverts); lfVector *r = create_lfvector(numverts); lfVector *c = create_lfvector(numverts); lfVector *q = create_lfvector(numverts); lfVector *s = create_lfvector(numverts); float bnorm2, delta_new, delta_old, delta_target, alpha; cp_lfvector(ldV, z, numverts); /* d0 = filter(B)^T * P * filter(B) */ cp_lfvector(fB, lB, numverts); filter(fB, S); bnorm2 = dot_lfvector(fB, fB, numverts); delta_target = conjgrad_epsilon * conjgrad_epsilon * bnorm2; /* r = filter(B - A * dV) */ mul_bfmatrix_lfvector(AdV, lA, ldV); sub_lfvector_lfvector(r, lB, AdV, numverts); filter(r, S); /* c = filter(P^-1 * r) */ cp_lfvector(c, r, numverts); filter(c, S); /* delta = r^T * c */ delta_new = dot_lfvector(r, c, numverts); # ifdef IMPLICIT_PRINT_SOLVER_INPUT_OUTPUT printf("==== A ====\n"); print_bfmatrix(lA); printf("==== z ====\n"); print_lvector(z, numverts); printf("==== B ====\n"); print_lvector(lB, numverts); printf("==== S ====\n"); print_bfmatrix(S); # endif while (delta_new > delta_target && conjgrad_loopcount < conjgrad_looplimit) { mul_bfmatrix_lfvector(q, lA, c); filter(q, S); alpha = delta_new / dot_lfvector(c, q, numverts); add_lfvector_lfvectorS(ldV, ldV, c, alpha, numverts); add_lfvector_lfvectorS(r, r, q, -alpha, numverts); /* s = P^-1 * r */ cp_lfvector(s, r, numverts); delta_old = delta_new; delta_new = dot_lfvector(r, s, numverts); add_lfvector_lfvectorS(c, s, c, delta_new / delta_old, numverts); filter(c, S); conjgrad_loopcount++; } # ifdef IMPLICIT_PRINT_SOLVER_INPUT_OUTPUT printf("==== dV ====\n"); print_lvector(ldV, numverts); printf("========\n"); # endif del_lfvector(fB); del_lfvector(AdV); del_lfvector(r); del_lfvector(c); del_lfvector(q); del_lfvector(s); // printf("W/O conjgrad_loopcount: %d\n", conjgrad_loopcount); result->status = conjgrad_loopcount < conjgrad_looplimit ? BPH_SOLVER_SUCCESS : BPH_SOLVER_NO_CONVERGENCE; result->iterations = conjgrad_loopcount; result->error = bnorm2 > 0.0f ? sqrtf(delta_new / bnorm2) : 0.0f; return conjgrad_loopcount < conjgrad_looplimit; // true means we reached desired accuracy in given time - ie stable } # if 0 // block diagonalizer DO_INLINE void BuildPPinv(fmatrix3x3 *lA, fmatrix3x3 *P, fmatrix3x3 *Pinv) { unsigned int i = 0; // Take only the diagonal blocks of A // #pragma omp parallel for private(i) if (lA[0].vcount > CLOTH_OPENMP_LIMIT) for (i = 0; i < lA[0].vcount; i++) { // block diagonalizer cp_fmatrix(P[i].m, lA[i].m); inverse_fmatrix(Pinv[i].m, P[i].m); } } # if 0 // version 1.3 static int cg_filtered_pre(lfVector *dv, fmatrix3x3 *lA, lfVector *lB, lfVector *z, fmatrix3x3 *S, fmatrix3x3 *P, fmatrix3x3 *Pinv) { unsigned int numverts = lA[0].vcount, iterations = 0, conjgrad_looplimit = 100; float delta0 = 0, deltaNew = 0, deltaOld = 0, alpha = 0; float conjgrad_epsilon = 0.0001; // 0.2 is dt for steps=5 lfVector *r = create_lfvector(numverts); lfVector *p = create_lfvector(numverts); lfVector *s = create_lfvector(numverts); lfVector *h = create_lfvector(numverts); BuildPPinv(lA, P, Pinv); filter(dv, S); add_lfvector_lfvector(dv, dv, z, numverts); mul_bfmatrix_lfvector(r, lA, dv); sub_lfvector_lfvector(r, lB, r, numverts); filter(r, S); mul_prevfmatrix_lfvector(p, Pinv, r); filter(p, S); deltaNew = dot_lfvector(r, p, numverts); delta0 = deltaNew * sqrt(conjgrad_epsilon); # ifdef DEBUG_TIME double start = PIL_check_seconds_timer(); # endif while ((deltaNew > delta0) && (iterations < conjgrad_looplimit)) { iterations++; mul_bfmatrix_lfvector(s, lA, p); filter(s, S); alpha = deltaNew / dot_lfvector(p, s, numverts); add_lfvector_lfvectorS(dv, dv, p, alpha, numverts); add_lfvector_lfvectorS(r, r, s, -alpha, numverts); mul_prevfmatrix_lfvector(h, Pinv, r); filter(h, S); deltaOld = deltaNew; deltaNew = dot_lfvector(r, h, numverts); add_lfvector_lfvectorS(p, h, p, deltaNew / deltaOld, numverts); filter(p, S); } # ifdef DEBUG_TIME double end = PIL_check_seconds_timer(); printf("cg_filtered_pre time: %f\n", (float)(end - start)); # endif del_lfvector(h); del_lfvector(s); del_lfvector(p); del_lfvector(r); printf("iterations: %d\n", iterations); return iterations < conjgrad_looplimit; } # endif // version 1.4 static int cg_filtered_pre(lfVector *dv, fmatrix3x3 *lA, lfVector *lB, lfVector *z, fmatrix3x3 *S, fmatrix3x3 *P, fmatrix3x3 *Pinv, fmatrix3x3 *bigI) { unsigned int numverts = lA[0].vcount, iterations = 0, conjgrad_looplimit = 100; float delta0 = 0, deltaNew = 0, deltaOld = 0, alpha = 0, tol = 0; lfVector *r = create_lfvector(numverts); lfVector *p = create_lfvector(numverts); lfVector *s = create_lfvector(numverts); lfVector *h = create_lfvector(numverts); lfVector *bhat = create_lfvector(numverts); lfVector *btemp = create_lfvector(numverts); BuildPPinv(lA, P, Pinv); initdiag_bfmatrix(bigI, I); sub_bfmatrix_Smatrix(bigI, bigI, S); // x = Sx_0+(I-S)z filter(dv, S); add_lfvector_lfvector(dv, dv, z, numverts); // b_hat = S(b-A(I-S)z) mul_bfmatrix_lfvector(r, lA, z); mul_bfmatrix_lfvector(bhat, bigI, r); sub_lfvector_lfvector(bhat, lB, bhat, numverts); // r = S(b-Ax) mul_bfmatrix_lfvector(r, lA, dv); sub_lfvector_lfvector(r, lB, r, numverts); filter(r, S); // p = SP^-1r mul_prevfmatrix_lfvector(p, Pinv, r); filter(p, S); // delta0 = bhat^TP^-1bhat mul_prevfmatrix_lfvector(btemp, Pinv, bhat); delta0 = dot_lfvector(bhat, btemp, numverts); // deltaNew = r^TP deltaNew = dot_lfvector(r, p, numverts); # if 0 filter(dv, S); add_lfvector_lfvector(dv, dv, z, numverts); mul_bfmatrix_lfvector(r, lA, dv); sub_lfvector_lfvector(r, lB, r, numverts); filter(r, S); mul_prevfmatrix_lfvector(p, Pinv, r); filter(p, S); deltaNew = dot_lfvector(r, p, numverts); delta0 = deltaNew * sqrt(conjgrad_epsilon); # endif # ifdef DEBUG_TIME double start = PIL_check_seconds_timer(); # endif tol = (0.01 * 0.2); while ((deltaNew > delta0 * tol * tol) && (iterations < conjgrad_looplimit)) { iterations++; mul_bfmatrix_lfvector(s, lA, p); filter(s, S); alpha = deltaNew / dot_lfvector(p, s, numverts); add_lfvector_lfvectorS(dv, dv, p, alpha, numverts); add_lfvector_lfvectorS(r, r, s, -alpha, numverts); mul_prevfmatrix_lfvector(h, Pinv, r); filter(h, S); deltaOld = deltaNew; deltaNew = dot_lfvector(r, h, numverts); add_lfvector_lfvectorS(p, h, p, deltaNew / deltaOld, numverts); filter(p, S); } # ifdef DEBUG_TIME double end = PIL_check_seconds_timer(); printf("cg_filtered_pre time: %f\n", (float)(end - start)); # endif del_lfvector(btemp); del_lfvector(bhat); del_lfvector(h); del_lfvector(s); del_lfvector(p); del_lfvector(r); // printf("iterations: %d\n", iterations); return iterations < conjgrad_looplimit; } # endif bool BPH_mass_spring_solve_velocities(Implicit_Data *data, float dt, ImplicitSolverResult *result) { unsigned int numverts = data->dFdV[0].vcount; lfVector *dFdXmV = create_lfvector(numverts); zero_lfvector(data->dV, numverts); cp_bfmatrix(data->A, data->M); subadd_bfmatrixS_bfmatrixS(data->A, data->dFdV, dt, data->dFdX, (dt * dt)); mul_bfmatrix_lfvector(dFdXmV, data->dFdX, data->V); add_lfvectorS_lfvectorS(data->B, data->F, dt, dFdXmV, (dt * dt), numverts); # ifdef DEBUG_TIME double start = PIL_check_seconds_timer(); # endif /* Conjugate gradient algorithm to solve Ax=b. */ cg_filtered(data->dV, data->A, data->B, data->z, data->S, result); // cg_filtered_pre(id->dV, id->A, id->B, id->z, id->S, id->P, id->Pinv, id->bigI); # ifdef DEBUG_TIME double end = PIL_check_seconds_timer(); printf("cg_filtered calc time: %f\n", (float)(end - start)); # endif // advance velocities add_lfvector_lfvector(data->Vnew, data->V, data->dV, numverts); del_lfvector(dFdXmV); return result->status == BPH_SOLVER_SUCCESS; } bool BPH_mass_spring_solve_positions(Implicit_Data *data, float dt) { int numverts = data->M[0].vcount; // advance positions add_lfvector_lfvectorS(data->Xnew, data->X, data->Vnew, dt, numverts); return true; } void BPH_mass_spring_apply_result(Implicit_Data *data) { int numverts = data->M[0].vcount; cp_lfvector(data->X, data->Xnew, numverts); cp_lfvector(data->V, data->Vnew, numverts); } void BPH_mass_spring_set_vertex_mass(Implicit_Data *data, int index, float mass) { unit_m3(data->M[index].m); mul_m3_fl(data->M[index].m, mass); } void BPH_mass_spring_set_rest_transform(Implicit_Data *data, int index, float tfm[3][3]) { # ifdef CLOTH_ROOT_FRAME copy_m3_m3(data->tfm[index].m, tfm); # else unit_m3(data->tfm[index].m); (void)tfm; # endif } void BPH_mass_spring_set_motion_state(Implicit_Data *data, int index, const float x[3], const float v[3]) { world_to_root_v3(data, index, data->X[index], x); world_to_root_v3(data, index, data->V[index], v); } void BPH_mass_spring_set_position(Implicit_Data *data, int index, const float x[3]) { world_to_root_v3(data, index, data->X[index], x); } void BPH_mass_spring_set_velocity(Implicit_Data *data, int index, const float v[3]) { world_to_root_v3(data, index, data->V[index], v); } void BPH_mass_spring_get_motion_state(struct Implicit_Data *data, int index, float x[3], float v[3]) { if (x) { root_to_world_v3(data, index, x, data->X[index]); } if (v) { root_to_world_v3(data, index, v, data->V[index]); } } void BPH_mass_spring_get_position(struct Implicit_Data *data, int index, float x[3]) { root_to_world_v3(data, index, x, data->X[index]); } void BPH_mass_spring_get_velocity(struct Implicit_Data *data, int index, float v[3]) { root_to_world_v3(data, index, v, data->V[index]); } void BPH_mass_spring_get_new_position(struct Implicit_Data *data, int index, float x[3]) { root_to_world_v3(data, index, x, data->Xnew[index]); } void BPH_mass_spring_set_new_position(struct Implicit_Data *data, int index, const float x[3]) { world_to_root_v3(data, index, data->Xnew[index], x); } void BPH_mass_spring_get_new_velocity(struct Implicit_Data *data, int index, float v[3]) { root_to_world_v3(data, index, v, data->Vnew[index]); } void BPH_mass_spring_set_new_velocity(struct Implicit_Data *data, int index, const float v[3]) { world_to_root_v3(data, index, data->Vnew[index], v); } /* -------------------------------- */ static int BPH_mass_spring_add_block(Implicit_Data *data, int v1, int v2) { int s = data->M[0].vcount + data->num_blocks; /* index from array start */ BLI_assert(s < data->M[0].vcount + data->M[0].scount); ++data->num_blocks; /* tfm and S don't have spring entries (diagonal blocks only) */ init_fmatrix(data->bigI + s, v1, v2); init_fmatrix(data->M + s, v1, v2); init_fmatrix(data->dFdX + s, v1, v2); init_fmatrix(data->dFdV + s, v1, v2); init_fmatrix(data->A + s, v1, v2); init_fmatrix(data->P + s, v1, v2); init_fmatrix(data->Pinv + s, v1, v2); return s; } void BPH_mass_spring_clear_constraints(Implicit_Data *data) { int i, numverts = data->S[0].vcount; for (i = 0; i < numverts; i++) { unit_m3(data->S[i].m); zero_v3(data->z[i]); } } void BPH_mass_spring_add_constraint_ndof0(Implicit_Data *data, int index, const float dV[3]) { zero_m3(data->S[index].m); world_to_root_v3(data, index, data->z[index], dV); } void BPH_mass_spring_add_constraint_ndof1( Implicit_Data *data, int index, const float c1[3], const float c2[3], const float dV[3]) { float m[3][3], p[3], q[3], u[3], cmat[3][3]; world_to_root_v3(data, index, p, c1); mul_fvectorT_fvector(cmat, p, p); sub_m3_m3m3(m, I, cmat); world_to_root_v3(data, index, q, c2); mul_fvectorT_fvector(cmat, q, q); sub_m3_m3m3(m, m, cmat); /* XXX not sure but multiplication should work here */ copy_m3_m3(data->S[index].m, m); // mul_m3_m3m3(data->S[index].m, data->S[index].m, m); world_to_root_v3(data, index, u, dV); add_v3_v3(data->z[index], u); } void BPH_mass_spring_add_constraint_ndof2(Implicit_Data *data, int index, const float c1[3], const float dV[3]) { float m[3][3], p[3], u[3], cmat[3][3]; world_to_root_v3(data, index, p, c1); mul_fvectorT_fvector(cmat, p, p); sub_m3_m3m3(m, I, cmat); copy_m3_m3(data->S[index].m, m); // mul_m3_m3m3(data->S[index].m, data->S[index].m, m); world_to_root_v3(data, index, u, dV); add_v3_v3(data->z[index], u); } void BPH_mass_spring_clear_forces(Implicit_Data *data) { int numverts = data->M[0].vcount; zero_lfvector(data->F, numverts); init_bfmatrix(data->dFdX, ZERO); init_bfmatrix(data->dFdV, ZERO); data->num_blocks = 0; } void BPH_mass_spring_force_reference_frame(Implicit_Data *data, int index, const float acceleration[3], const float omega[3], const float domega_dt[3], float mass) { # ifdef CLOTH_ROOT_FRAME float acc[3], w[3], dwdt[3]; float f[3], dfdx[3][3], dfdv[3][3]; float euler[3], coriolis[3], centrifugal[3], rotvel[3]; float deuler[3][3], dcoriolis[3][3], dcentrifugal[3][3], drotvel[3][3]; world_to_root_v3(data, index, acc, acceleration); world_to_root_v3(data, index, w, omega); world_to_root_v3(data, index, dwdt, domega_dt); cross_v3_v3v3(euler, dwdt, data->X[index]); cross_v3_v3v3(coriolis, w, data->V[index]); mul_v3_fl(coriolis, 2.0f); cross_v3_v3v3(rotvel, w, data->X[index]); cross_v3_v3v3(centrifugal, w, rotvel); sub_v3_v3v3(f, acc, euler); sub_v3_v3(f, coriolis); sub_v3_v3(f, centrifugal); mul_v3_fl(f, mass); /* F = m * a */ cross_v3_identity(deuler, dwdt); cross_v3_identity(dcoriolis, w); mul_m3_fl(dcoriolis, 2.0f); cross_v3_identity(drotvel, w); cross_m3_v3m3(dcentrifugal, w, drotvel); add_m3_m3m3(dfdx, deuler, dcentrifugal); negate_m3(dfdx); mul_m3_fl(dfdx, mass); copy_m3_m3(dfdv, dcoriolis); negate_m3(dfdv); mul_m3_fl(dfdv, mass); add_v3_v3(data->F[index], f); add_m3_m3m3(data->dFdX[index].m, data->dFdX[index].m, dfdx); add_m3_m3m3(data->dFdV[index].m, data->dFdV[index].m, dfdv); # else (void)data; (void)index; (void)acceleration; (void)omega; (void)domega_dt; # endif } void BPH_mass_spring_force_gravity(Implicit_Data *data, int index, float mass, const float g[3]) { /* force = mass * acceleration (in this case: gravity) */ float f[3]; world_to_root_v3(data, index, f, g); mul_v3_fl(f, mass); add_v3_v3(data->F[index], f); } void BPH_mass_spring_force_drag(Implicit_Data *data, float drag) { int i, numverts = data->M[0].vcount; for (i = 0; i < numverts; i++) { float tmp[3][3]; /* NB: uses root space velocity, no need to transform */ madd_v3_v3fl(data->F[i], data->V[i], -drag); copy_m3_m3(tmp, I); mul_m3_fl(tmp, -drag); add_m3_m3m3(data->dFdV[i].m, data->dFdV[i].m, tmp); } } void BPH_mass_spring_force_extern( struct Implicit_Data *data, int i, const float f[3], float dfdx[3][3], float dfdv[3][3]) { float tf[3], tdfdx[3][3], tdfdv[3][3]; world_to_root_v3(data, i, tf, f); world_to_root_m3(data, i, tdfdx, dfdx); world_to_root_m3(data, i, tdfdv, dfdv); add_v3_v3(data->F[i], tf); add_m3_m3m3(data->dFdX[i].m, data->dFdX[i].m, tdfdx); add_m3_m3m3(data->dFdV[i].m, data->dFdV[i].m, tdfdv); } static float calc_nor_area_tri(float nor[3], const float v1[3], const float v2[3], const float v3[3]) { float n1[3], n2[3]; sub_v3_v3v3(n1, v1, v2); sub_v3_v3v3(n2, v2, v3); cross_v3_v3v3(nor, n1, n2); return normalize_v3(nor) / 2.0f; } /* XXX does not support force jacobians yet, since the effector system does not provide them either */ void BPH_mass_spring_force_face_wind( Implicit_Data *data, int v1, int v2, int v3, const float (*winvec)[3]) { const float effector_scale = 0.02f; int vs[3] = {v1, v2, v3}; float win[3], nor[3], area; float factor, base_force; float force[3]; /* calculate face normal and area */ area = calc_nor_area_tri(nor, data->X[v1], data->X[v2], data->X[v3]); /* The force is calculated and split up evenly for each of the three face verts */ factor = effector_scale * area / 3.0f; /* Calculate wind pressure at each vertex by projecting the wind field on the normal. */ for (int i = 0; i < 3; i++) { world_to_root_v3(data, vs[i], win, winvec[vs[i]]); force[i] = dot_v3v3(win, nor); } /* Compute per-vertex force values from local pressures. * From integrating the pressure over the triangle and deriving * equivalent vertex forces, it follows that: * * force[idx] = (sum(pressure) + pressure[idx]) * area / 12 * * Effectively, 1/4 of the pressure acts just on its vertex, * while 3/4 is split evenly over all three. */ mul_v3_fl(force, factor / 4.0f); base_force = force[0] + force[1] + force[2]; /* add pressure to each of the face verts */ madd_v3_v3fl(data->F[v1], nor, base_force + force[0]); madd_v3_v3fl(data->F[v2], nor, base_force + force[1]); madd_v3_v3fl(data->F[v3], nor, base_force + force[2]); } void BPH_mass_spring_force_face_extern( Implicit_Data *data, int v1, int v2, int v3, const float (*forcevec)[3]) { const float effector_scale = 0.02f; int vs[3] = {v1, v2, v3}; float nor[3], area; float factor, base_force[3]; float force[3][3]; /* calculate face normal and area */ area = calc_nor_area_tri(nor, data->X[v1], data->X[v2], data->X[v3]); /* The force is calculated and split up evenly for each of the three face verts */ factor = effector_scale * area / 3.0f; /* Compute common and per-vertex force vectors from the original inputs. */ zero_v3(base_force); for (int i = 0; i < 3; i++) { world_to_root_v3(data, vs[i], force[i], forcevec[vs[i]]); mul_v3_fl(force[i], factor / 4.0f); add_v3_v3(base_force, force[i]); } /* Apply the common and vertex components to all vertices. */ for (int i = 0; i < 3; i++) { add_v3_v3(force[i], base_force); add_v3_v3(data->F[vs[i]], force[i]); } } float BPH_tri_tetra_volume_signed_6x(Implicit_Data *data, int v1, int v2, int v3) { /* The result will be 6x the volume */ return volume_tri_tetrahedron_signed_v3_6x(data->X[v1], data->X[v2], data->X[v3]); } float BPH_tri_area(struct Implicit_Data *data, int v1, int v2, int v3) { float nor[3]; return calc_nor_area_tri(nor, data->X[v1], data->X[v2], data->X[v3]); } void BPH_mass_spring_force_pressure(Implicit_Data *data, int v1, int v2, int v3, float common_pressure, const float *vertex_pressure, const float weights[3]) { float nor[3], area; float factor, base_force; float force[3]; /* calculate face normal and area */ area = calc_nor_area_tri(nor, data->X[v1], data->X[v2], data->X[v3]); /* The force is calculated and split up evenly for each of the three face verts */ factor = area / 3.0f; base_force = common_pressure * factor; /* Compute per-vertex force values from local pressures. * From integrating the pressure over the triangle and deriving * equivalent vertex forces, it follows that: * * force[idx] = (sum(pressure) + pressure[idx]) * area / 12 * * Effectively, 1/4 of the pressure acts just on its vertex, * while 3/4 is split evenly over all three. */ if (vertex_pressure) { copy_v3_fl3(force, vertex_pressure[v1], vertex_pressure[v2], vertex_pressure[v3]); mul_v3_fl(force, factor / 4.0f); base_force += force[0] + force[1] + force[2]; } else { zero_v3(force); } /* add pressure to each of the face verts */ madd_v3_v3fl(data->F[v1], nor, (base_force + force[0]) * weights[0]); madd_v3_v3fl(data->F[v2], nor, (base_force + force[1]) * weights[1]); madd_v3_v3fl(data->F[v3], nor, (base_force + force[2]) * weights[2]); } static void edge_wind_vertex(const float dir[3], float length, float radius, const float wind[3], float f[3], float UNUSED(dfdx[3][3]), float UNUSED(dfdv[3][3])) { const float density = 0.01f; /* XXX arbitrary value, corresponds to effect of air density */ float cos_alpha, sin_alpha, cross_section; float windlen = len_v3(wind); if (windlen == 0.0f) { zero_v3(f); return; } /* angle of wind direction to edge */ cos_alpha = dot_v3v3(wind, dir) / windlen; sin_alpha = sqrtf(1.0f - cos_alpha * cos_alpha); cross_section = radius * ((float)M_PI * radius * sin_alpha + length * cos_alpha); mul_v3_v3fl(f, wind, density * cross_section); } void BPH_mass_spring_force_edge_wind( Implicit_Data *data, int v1, int v2, float radius1, float radius2, const float (*winvec)[3]) { float win[3], dir[3], length; float f[3], dfdx[3][3], dfdv[3][3]; sub_v3_v3v3(dir, data->X[v1], data->X[v2]); length = normalize_v3(dir); world_to_root_v3(data, v1, win, winvec[v1]); edge_wind_vertex(dir, length, radius1, win, f, dfdx, dfdv); add_v3_v3(data->F[v1], f); world_to_root_v3(data, v2, win, winvec[v2]); edge_wind_vertex(dir, length, radius2, win, f, dfdx, dfdv); add_v3_v3(data->F[v2], f); } void BPH_mass_spring_force_vertex_wind(Implicit_Data *data, int v, float UNUSED(radius), const float (*winvec)[3]) { const float density = 0.01f; /* XXX arbitrary value, corresponds to effect of air density */ float wind[3]; float f[3]; world_to_root_v3(data, v, wind, winvec[v]); mul_v3_v3fl(f, wind, density); add_v3_v3(data->F[v], f); } BLI_INLINE void dfdx_spring(float to[3][3], const float dir[3], float length, float L, float k) { // dir is unit length direction, rest is spring's restlength, k is spring constant. // return ( (I-outerprod(dir, dir))*Min(1.0f, rest/length) - I) * -k; outerproduct(to, dir, dir); sub_m3_m3m3(to, I, to); mul_m3_fl(to, (L / length)); sub_m3_m3m3(to, to, I); mul_m3_fl(to, k); } /* unused */ # if 0 BLI_INLINE void dfdx_damp(float to[3][3], const float dir[3], float length, const float vel[3], float rest, float damping) { // inner spring damping vel is the relative velocity of the endpoints. // return (I-outerprod(dir, dir)) * (-damping * -(dot(dir, vel)/Max(length, rest))); mul_fvectorT_fvector(to, dir, dir); sub_fmatrix_fmatrix(to, I, to); mul_fmatrix_S(to, (-damping * -(dot_v3v3(dir, vel) / MAX2(length, rest)))); } # endif BLI_INLINE void dfdv_damp(float to[3][3], const float dir[3], float damping) { // derivative of force wrt velocity outerproduct(to, dir, dir); mul_m3_fl(to, -damping); } BLI_INLINE float fb(float length, float L) { float x = length / L; float xx = x * x; float xxx = xx * x; float xxxx = xxx * x; return (-11.541f * xxxx + 34.193f * xxx - 39.083f * xx + 23.116f * x - 9.713f); } BLI_INLINE float fbderiv(float length, float L) { float x = length / L; float xx = x * x; float xxx = xx * x; return (-46.164f * xxx + 102.579f * xx - 78.166f * x + 23.116f); } BLI_INLINE float fbstar(float length, float L, float kb, float cb) { float tempfb_fl = kb * fb(length, L); float fbstar_fl = cb * (length - L); if (tempfb_fl < fbstar_fl) { return fbstar_fl; } else { return tempfb_fl; } } // function to calculae bending spring force (taken from Choi & Co) BLI_INLINE float fbstar_jacobi(float length, float L, float kb, float cb) { float tempfb_fl = kb * fb(length, L); float fbstar_fl = cb * (length - L); if (tempfb_fl < fbstar_fl) { return -cb; } else { return -kb * fbderiv(length, L); } } /* calculate elonglation */ BLI_INLINE bool spring_length(Implicit_Data *data, int i, int j, float r_extent[3], float r_dir[3], float *r_length, float r_vel[3]) { sub_v3_v3v3(r_extent, data->X[j], data->X[i]); sub_v3_v3v3(r_vel, data->V[j], data->V[i]); *r_length = len_v3(r_extent); if (*r_length > ALMOST_ZERO) { # if 0 if (length > L) { if ((clmd->sim_parms->flags & CSIMSETT_FLAG_TEARING_ENABLED) && (((length - L) * 100.0f / L) > clmd->sim_parms->maxspringlen)) { // cut spring! s->flags |= CSPRING_FLAG_DEACTIVATE; return false; } } # endif mul_v3_v3fl(r_dir, r_extent, 1.0f / (*r_length)); } else { zero_v3(r_dir); } return true; } BLI_INLINE void apply_spring( Implicit_Data *data, int i, int j, const float f[3], float dfdx[3][3], float dfdv[3][3]) { int block_ij = BPH_mass_spring_add_block(data, i, j); add_v3_v3(data->F[i], f); sub_v3_v3(data->F[j], f); add_m3_m3m3(data->dFdX[i].m, data->dFdX[i].m, dfdx); add_m3_m3m3(data->dFdX[j].m, data->dFdX[j].m, dfdx); sub_m3_m3m3(data->dFdX[block_ij].m, data->dFdX[block_ij].m, dfdx); add_m3_m3m3(data->dFdV[i].m, data->dFdV[i].m, dfdv); add_m3_m3m3(data->dFdV[j].m, data->dFdV[j].m, dfdv); sub_m3_m3m3(data->dFdV[block_ij].m, data->dFdV[block_ij].m, dfdv); } bool BPH_mass_spring_force_spring_linear(Implicit_Data *data, int i, int j, float restlen, float stiffness_tension, float damping_tension, float stiffness_compression, float damping_compression, bool resist_compress, bool new_compress, float clamp_force) { float extent[3], length, dir[3], vel[3]; float f[3], dfdx[3][3], dfdv[3][3]; float damping = 0; // calculate elonglation spring_length(data, i, j, extent, dir, &length, vel); /* This code computes not only the force, but also its derivative. * Zero derivative effectively disables the spring for the implicit solver. * Thus length > restlen makes cloth unconstrained at the start of simulation. */ if ((length >= restlen && length > 0) || resist_compress) { float stretch_force; damping = damping_tension; stretch_force = stiffness_tension * (length - restlen); if (clamp_force > 0.0f && stretch_force > clamp_force) { stretch_force = clamp_force; } mul_v3_v3fl(f, dir, stretch_force); dfdx_spring(dfdx, dir, length, restlen, stiffness_tension); } else if (new_compress) { /* This is based on the Choi and Ko bending model, * which works surprisingly well for compression. */ float kb = stiffness_compression; float cb = kb; /* cb equal to kb seems to work, but a factor can be added if necessary */ damping = damping_compression; mul_v3_v3fl(f, dir, fbstar(length, restlen, kb, cb)); outerproduct(dfdx, dir, dir); mul_m3_fl(dfdx, fbstar_jacobi(length, restlen, kb, cb)); } else { return false; } madd_v3_v3fl(f, dir, damping * dot_v3v3(vel, dir)); dfdv_damp(dfdv, dir, damping); apply_spring(data, i, j, f, dfdx, dfdv); return true; } /* See "Stable but Responsive Cloth" (Choi, Ko 2005) */ bool BPH_mass_spring_force_spring_bending( Implicit_Data *data, int i, int j, float restlen, float kb, float cb) { float extent[3], length, dir[3], vel[3]; // calculate elonglation spring_length(data, i, j, extent, dir, &length, vel); if (length < restlen) { float f[3], dfdx[3][3], dfdv[3][3]; mul_v3_v3fl(f, dir, fbstar(length, restlen, kb, cb)); outerproduct(dfdx, dir, dir); mul_m3_fl(dfdx, fbstar_jacobi(length, restlen, kb, cb)); /* XXX damping not supported */ zero_m3(dfdv); apply_spring(data, i, j, f, dfdx, dfdv); return true; } else { return false; } } BLI_INLINE void poly_avg(lfVector *data, const int *inds, int len, float r_avg[3]) { float fact = 1.0f / (float)len; zero_v3(r_avg); for (int i = 0; i < len; i++) { madd_v3_v3fl(r_avg, data[inds[i]], fact); } } BLI_INLINE void poly_norm(lfVector *data, int i, int j, int *inds, int len, float r_dir[3]) { float mid[3]; poly_avg(data, inds, len, mid); normal_tri_v3(r_dir, data[i], data[j], mid); } BLI_INLINE void edge_avg(lfVector *data, int i, int j, float r_avg[3]) { r_avg[0] = (data[i][0] + data[j][0]) * 0.5f; r_avg[1] = (data[i][1] + data[j][1]) * 0.5f; r_avg[2] = (data[i][2] + data[j][2]) * 0.5f; } BLI_INLINE void edge_norm(lfVector *data, int i, int j, float r_dir[3]) { sub_v3_v3v3(r_dir, data[i], data[j]); normalize_v3(r_dir); } BLI_INLINE float bend_angle(float dir_a[3], float dir_b[3], float dir_e[3]) { float cos, sin; float tmp[3]; cos = dot_v3v3(dir_a, dir_b); cross_v3_v3v3(tmp, dir_a, dir_b); sin = dot_v3v3(tmp, dir_e); return atan2f(sin, cos); } BLI_INLINE void spring_angle(Implicit_Data *data, int i, int j, int *i_a, int *i_b, int len_a, int len_b, float r_dir_a[3], float r_dir_b[3], float *r_angle, float r_vel_a[3], float r_vel_b[3]) { float dir_e[3], vel_e[3]; poly_norm(data->X, j, i, i_a, len_a, r_dir_a); poly_norm(data->X, i, j, i_b, len_b, r_dir_b); edge_norm(data->X, i, j, dir_e); *r_angle = bend_angle(r_dir_a, r_dir_b, dir_e); poly_avg(data->V, i_a, len_a, r_vel_a); poly_avg(data->V, i_b, len_b, r_vel_b); edge_avg(data->V, i, j, vel_e); sub_v3_v3(r_vel_a, vel_e); sub_v3_v3(r_vel_b, vel_e); } /* Angular springs roughly based on the bending model proposed by Baraff and Witkin in "Large Steps * in Cloth Simulation". */ bool BPH_mass_spring_force_spring_angular(Implicit_Data *data, int i, int j, int *i_a, int *i_b, int len_a, int len_b, float restang, float stiffness, float damping) { float angle, dir_a[3], dir_b[3], vel_a[3], vel_b[3]; float f_a[3], f_b[3], f_e[3]; float force; int x; spring_angle(data, i, j, i_a, i_b, len_a, len_b, dir_a, dir_b, &angle, vel_a, vel_b); /* spring force */ force = stiffness * (angle - restang); /* damping force */ force += -damping * (dot_v3v3(vel_a, dir_a) + dot_v3v3(vel_b, dir_b)); mul_v3_v3fl(f_a, dir_a, force / len_a); mul_v3_v3fl(f_b, dir_b, force / len_b); for (x = 0; x < len_a; x++) { add_v3_v3(data->F[i_a[x]], f_a); } for (x = 0; x < len_b; x++) { add_v3_v3(data->F[i_b[x]], f_b); } mul_v3_v3fl(f_a, dir_a, force * 0.5f); mul_v3_v3fl(f_b, dir_b, force * 0.5f); add_v3_v3v3(f_e, f_a, f_b); sub_v3_v3(data->F[i], f_e); sub_v3_v3(data->F[j], f_e); return true; } /* Jacobian of a direction vector. * Basically the part of the differential orthogonal to the direction, * inversely proportional to the length of the edge. * * dD_ij/dx_i = -dD_ij/dx_j = (D_ij * D_ij^T - I) / len_ij */ BLI_INLINE void spring_grad_dir( Implicit_Data *data, int i, int j, float edge[3], float dir[3], float grad_dir[3][3]) { float length; sub_v3_v3v3(edge, data->X[j], data->X[i]); length = normalize_v3_v3(dir, edge); if (length > ALMOST_ZERO) { outerproduct(grad_dir, dir, dir); sub_m3_m3m3(grad_dir, I, grad_dir); mul_m3_fl(grad_dir, 1.0f / length); } else { zero_m3(grad_dir); } } BLI_INLINE void spring_hairbend_forces(Implicit_Data *data, int i, int j, int k, const float goal[3], float stiffness, float damping, int q, const float dx[3], const float dv[3], float r_f[3]) { float edge_ij[3], dir_ij[3]; float edge_jk[3], dir_jk[3]; float vel_ij[3], vel_jk[3], vel_ortho[3]; float f_bend[3], f_damp[3]; float fk[3]; float dist[3]; zero_v3(fk); sub_v3_v3v3(edge_ij, data->X[j], data->X[i]); if (q == i) { sub_v3_v3(edge_ij, dx); } if (q == j) { add_v3_v3(edge_ij, dx); } normalize_v3_v3(dir_ij, edge_ij); sub_v3_v3v3(edge_jk, data->X[k], data->X[j]); if (q == j) { sub_v3_v3(edge_jk, dx); } if (q == k) { add_v3_v3(edge_jk, dx); } normalize_v3_v3(dir_jk, edge_jk); sub_v3_v3v3(vel_ij, data->V[j], data->V[i]); if (q == i) { sub_v3_v3(vel_ij, dv); } if (q == j) { add_v3_v3(vel_ij, dv); } sub_v3_v3v3(vel_jk, data->V[k], data->V[j]); if (q == j) { sub_v3_v3(vel_jk, dv); } if (q == k) { add_v3_v3(vel_jk, dv); } /* bending force */ sub_v3_v3v3(dist, goal, edge_jk); mul_v3_v3fl(f_bend, dist, stiffness); add_v3_v3(fk, f_bend); /* damping force */ madd_v3_v3v3fl(vel_ortho, vel_jk, dir_jk, -dot_v3v3(vel_jk, dir_jk)); mul_v3_v3fl(f_damp, vel_ortho, damping); sub_v3_v3(fk, f_damp); copy_v3_v3(r_f, fk); } /* Finite Differences method for estimating the jacobian of the force */ BLI_INLINE void spring_hairbend_estimate_dfdx(Implicit_Data *data, int i, int j, int k, const float goal[3], float stiffness, float damping, int q, float dfdx[3][3]) { const float delta = 0.00001f; // TODO find a good heuristic for this float dvec_null[3][3], dvec_pos[3][3], dvec_neg[3][3]; float f[3]; int a, b; zero_m3(dvec_null); unit_m3(dvec_pos); mul_m3_fl(dvec_pos, delta * 0.5f); copy_m3_m3(dvec_neg, dvec_pos); negate_m3(dvec_neg); /* XXX TODO offset targets to account for position dependency */ for (a = 0; a < 3; a++) { spring_hairbend_forces( data, i, j, k, goal, stiffness, damping, q, dvec_pos[a], dvec_null[a], f); copy_v3_v3(dfdx[a], f); spring_hairbend_forces( data, i, j, k, goal, stiffness, damping, q, dvec_neg[a], dvec_null[a], f); sub_v3_v3(dfdx[a], f); for (b = 0; b < 3; b++) { dfdx[a][b] /= delta; } } } /* Finite Differences method for estimating the jacobian of the force */ BLI_INLINE void spring_hairbend_estimate_dfdv(Implicit_Data *data, int i, int j, int k, const float goal[3], float stiffness, float damping, int q, float dfdv[3][3]) { const float delta = 0.00001f; // TODO find a good heuristic for this float dvec_null[3][3], dvec_pos[3][3], dvec_neg[3][3]; float f[3]; int a, b; zero_m3(dvec_null); unit_m3(dvec_pos); mul_m3_fl(dvec_pos, delta * 0.5f); copy_m3_m3(dvec_neg, dvec_pos); negate_m3(dvec_neg); /* XXX TODO offset targets to account for position dependency */ for (a = 0; a < 3; a++) { spring_hairbend_forces( data, i, j, k, goal, stiffness, damping, q, dvec_null[a], dvec_pos[a], f); copy_v3_v3(dfdv[a], f); spring_hairbend_forces( data, i, j, k, goal, stiffness, damping, q, dvec_null[a], dvec_neg[a], f); sub_v3_v3(dfdv[a], f); for (b = 0; b < 3; b++) { dfdv[a][b] /= delta; } } } /* Angular spring that pulls the vertex toward the local target * See "Artistic Simulation of Curly Hair" (Pixar technical memo #12-03a) */ bool BPH_mass_spring_force_spring_bending_hair(Implicit_Data *data, int i, int j, int k, const float target[3], float stiffness, float damping) { float goal[3]; float fj[3], fk[3]; float dfj_dxi[3][3], dfj_dxj[3][3], dfk_dxi[3][3], dfk_dxj[3][3], dfk_dxk[3][3]; float dfj_dvi[3][3], dfj_dvj[3][3], dfk_dvi[3][3], dfk_dvj[3][3], dfk_dvk[3][3]; const float vecnull[3] = {0.0f, 0.0f, 0.0f}; int block_ij = BPH_mass_spring_add_block(data, i, j); int block_jk = BPH_mass_spring_add_block(data, j, k); int block_ik = BPH_mass_spring_add_block(data, i, k); world_to_root_v3(data, j, goal, target); spring_hairbend_forces(data, i, j, k, goal, stiffness, damping, k, vecnull, vecnull, fk); negate_v3_v3(fj, fk); /* counterforce */ spring_hairbend_estimate_dfdx(data, i, j, k, goal, stiffness, damping, i, dfk_dxi); spring_hairbend_estimate_dfdx(data, i, j, k, goal, stiffness, damping, j, dfk_dxj); spring_hairbend_estimate_dfdx(data, i, j, k, goal, stiffness, damping, k, dfk_dxk); copy_m3_m3(dfj_dxi, dfk_dxi); negate_m3(dfj_dxi); copy_m3_m3(dfj_dxj, dfk_dxj); negate_m3(dfj_dxj); spring_hairbend_estimate_dfdv(data, i, j, k, goal, stiffness, damping, i, dfk_dvi); spring_hairbend_estimate_dfdv(data, i, j, k, goal, stiffness, damping, j, dfk_dvj); spring_hairbend_estimate_dfdv(data, i, j, k, goal, stiffness, damping, k, dfk_dvk); copy_m3_m3(dfj_dvi, dfk_dvi); negate_m3(dfj_dvi); copy_m3_m3(dfj_dvj, dfk_dvj); negate_m3(dfj_dvj); /* add forces and jacobians to the solver data */ add_v3_v3(data->F[j], fj); add_v3_v3(data->F[k], fk); add_m3_m3m3(data->dFdX[j].m, data->dFdX[j].m, dfj_dxj); add_m3_m3m3(data->dFdX[k].m, data->dFdX[k].m, dfk_dxk); add_m3_m3m3(data->dFdX[block_ij].m, data->dFdX[block_ij].m, dfj_dxi); add_m3_m3m3(data->dFdX[block_jk].m, data->dFdX[block_jk].m, dfk_dxj); add_m3_m3m3(data->dFdX[block_ik].m, data->dFdX[block_ik].m, dfk_dxi); add_m3_m3m3(data->dFdV[j].m, data->dFdV[j].m, dfj_dvj); add_m3_m3m3(data->dFdV[k].m, data->dFdV[k].m, dfk_dvk); add_m3_m3m3(data->dFdV[block_ij].m, data->dFdV[block_ij].m, dfj_dvi); add_m3_m3m3(data->dFdV[block_jk].m, data->dFdV[block_jk].m, dfk_dvj); add_m3_m3m3(data->dFdV[block_ik].m, data->dFdV[block_ik].m, dfk_dvi); /* XXX analytical calculation of derivatives below is incorrect. * This proved to be difficult, but for now just using the finite difference method for * estimating the jacobians should be sufficient. */ # if 0 float edge_ij[3], dir_ij[3], grad_dir_ij[3][3]; float edge_jk[3], dir_jk[3], grad_dir_jk[3][3]; float dist[3], vel_jk[3], vel_jk_ortho[3], projvel[3]; float target[3]; float tmp[3][3]; float fi[3], fj[3], fk[3]; float dfi_dxi[3][3], dfj_dxi[3][3], dfj_dxj[3][3], dfk_dxi[3][3], dfk_dxj[3][3], dfk_dxk[3][3]; float dfdvi[3][3]; // TESTING damping = 0.0f; zero_v3(fi); zero_v3(fj); zero_v3(fk); zero_m3(dfi_dxi); zero_m3(dfj_dxi); zero_m3(dfk_dxi); zero_m3(dfk_dxj); zero_m3(dfk_dxk); /* jacobian of direction vectors */ spring_grad_dir(data, i, j, edge_ij, dir_ij, grad_dir_ij); spring_grad_dir(data, j, k, edge_jk, dir_jk, grad_dir_jk); sub_v3_v3v3(vel_jk, data->V[k], data->V[j]); /* bending force */ mul_v3_v3fl(target, dir_ij, restlen); sub_v3_v3v3(dist, target, edge_jk); mul_v3_v3fl(fk, dist, stiffness); /* damping force */ madd_v3_v3v3fl(vel_jk_ortho, vel_jk, dir_jk, -dot_v3v3(vel_jk, dir_jk)); madd_v3_v3fl(fk, vel_jk_ortho, damping); /* XXX this only holds true as long as we assume straight rest shape! * eventually will become a bit more involved since the opposite segment * gets its own target, under condition of having equal torque on both sides. */ copy_v3_v3(fi, fk); /* counterforce on the middle point */ sub_v3_v3(fj, fi); sub_v3_v3(fj, fk); /* === derivatives === */ madd_m3_m3fl(dfk_dxi, grad_dir_ij, stiffness * restlen); madd_m3_m3fl(dfk_dxj, grad_dir_ij, -stiffness * restlen); madd_m3_m3fl(dfk_dxj, I, stiffness); madd_m3_m3fl(dfk_dxk, I, -stiffness); copy_m3_m3(dfi_dxi, dfk_dxk); negate_m3(dfi_dxi); /* dfj_dfi == dfi_dfj due to symmetry, * dfi_dfj == dfk_dfj due to fi == fk * XXX see comment above on future bent rest shapes */ copy_m3_m3(dfj_dxi, dfk_dxj); /* dfj_dxj == -(dfi_dxj + dfk_dxj) due to fj == -(fi + fk) */ sub_m3_m3m3(dfj_dxj, dfj_dxj, dfj_dxi); sub_m3_m3m3(dfj_dxj, dfj_dxj, dfk_dxj); /* add forces and jacobians to the solver data */ add_v3_v3(data->F[i], fi); add_v3_v3(data->F[j], fj); add_v3_v3(data->F[k], fk); add_m3_m3m3(data->dFdX[i].m, data->dFdX[i].m, dfi_dxi); add_m3_m3m3(data->dFdX[j].m, data->dFdX[j].m, dfj_dxj); add_m3_m3m3(data->dFdX[k].m, data->dFdX[k].m, dfk_dxk); add_m3_m3m3(data->dFdX[block_ij].m, data->dFdX[block_ij].m, dfj_dxi); add_m3_m3m3(data->dFdX[block_jk].m, data->dFdX[block_jk].m, dfk_dxj); add_m3_m3m3(data->dFdX[block_ik].m, data->dFdX[block_ik].m, dfk_dxi); # endif return true; } bool BPH_mass_spring_force_spring_goal(Implicit_Data *data, int i, const float goal_x[3], const float goal_v[3], float stiffness, float damping) { float root_goal_x[3], root_goal_v[3], extent[3], length, dir[3], vel[3]; float f[3], dfdx[3][3], dfdv[3][3]; /* goal is in world space */ world_to_root_v3(data, i, root_goal_x, goal_x); world_to_root_v3(data, i, root_goal_v, goal_v); sub_v3_v3v3(extent, root_goal_x, data->X[i]); sub_v3_v3v3(vel, root_goal_v, data->V[i]); length = normalize_v3_v3(dir, extent); if (length > ALMOST_ZERO) { mul_v3_v3fl(f, dir, stiffness * length); // Ascher & Boxman, p.21: Damping only during elonglation // something wrong with it... madd_v3_v3fl(f, dir, damping * dot_v3v3(vel, dir)); dfdx_spring(dfdx, dir, length, 0.0f, stiffness); dfdv_damp(dfdv, dir, damping); add_v3_v3(data->F[i], f); add_m3_m3m3(data->dFdX[i].m, data->dFdX[i].m, dfdx); add_m3_m3m3(data->dFdV[i].m, data->dFdV[i].m, dfdv); return true; } else { return false; } } #endif /* IMPLICIT_SOLVER_BLENDER */
DRB114-if-orig-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* When if() evalutes to true, this program has data races due to true dependence within the loop at 65. Data race pair: a[i+1]@66:5 vs. a[i]@66:12 */ #include <stdlib.h> #include <stdio.h> #include <time.h> #include <omp.h> int main(int argc,char *argv[]) { int i; int len = 100; int a[100]; #pragma omp parallel for private (i) for (i = 0; i <= len - 1; i += 1) { a[i] = i; } srand((time(((void *)0)))); for (i = 0; i <= len - 1 - 1; i += 1) { a[i + 1] = a[i] + 1; } printf("a[50]=%d\n",a[50]); return 0; }
version1_2.c
// Compile with: // // // To specify the number of bodies in the world, the program optionally accepts // an integer as its first command line argument. #include <time.h> #include <sys/times.h> #include <math.h> #include <string.h> #include <stdlib.h> #include <stdio.h> #include <X11/Xlib.h> #include <unistd.h> #include "omp.h" #define WIDTH 1024 #define HEIGHT 768 // default number of bodies #define DEF_NUM_BODIES 200 // gravitational constant #define GRAV 10.0 // initial velocities are scaled by this value #define V_SCALAR 20.0 // initial masses are scaled by this value #define M_SCALAR 5.0 // radius scalar #define R_SCALAR 3 // coefficient of restitution determines the elasticity of a collision: C_REST = [0,1] // if C_REST = 0 -> perfectly inelastic (particles stick together) // if C_REST = 1 -> perfectly elastic (no loss of speed) #define C_REST 0.5 // set the iteration times #define iteration_times 100 // Must set 0 if run on Pi #define NOT_RUN_ON_PI 1 struct body { double x, y; // position double m; // mass }; struct speed{ double vx,vy; }; struct world { struct body *bodies; int num_bodies; }; struct speed* speed; double * radius; clock_t total_time = 0; //total_time.sec = 0; //total_time.usec = 0; /* This function initializes each particle's mass, velocity and position */ struct world *create_world(int num_bodies) { struct world *world = malloc(sizeof(struct world)); speed =malloc(sizeof(struct speed)*num_bodies); radius=malloc(sizeof(double)*num_bodies); world->num_bodies = num_bodies; world->bodies = malloc(sizeof(struct body) * num_bodies); int i = 0; double x; double y; double rc; int min_dim = (WIDTH < HEIGHT) ? WIDTH : HEIGHT; while (i < num_bodies) { x = drand48() * WIDTH; y = drand48() * HEIGHT; rc = sqrt((WIDTH / 2 - x) * (WIDTH / 2 - x) + (y - HEIGHT / 2) * (y - HEIGHT / 2)); if (rc <= min_dim / 2) { world->bodies[i].x = x; world->bodies[i].y = y; speed[i].vx = V_SCALAR * (y - HEIGHT / 2) / rc; speed[i].vy = V_SCALAR * (WIDTH / 2 - x) / rc; world->bodies[i].m = (1 / (0.025 + drand48())) * M_SCALAR; radius[i] = sqrt(world->bodies[i].m / M_PI) * R_SCALAR; i++; } } return world; } // set the foreground color given RGB values between 0..255. void set_color(Display *disp, GC gc, int r, int g, int b) { unsigned long int p; if (r < 0) r = 0; else if (r > 255) r = 255; if (g < 0) g = 0; else if (g > 255) g = 255; if (b < 0) b = 0; else if (b > 255) b = 255; p = (r << 16) | (g << 8) | (b); XSetForeground(disp, gc, p); } /* This function updates the screen with the new positions of each particle */ void draw_world(Display *disp, Pixmap back_buf, GC gc, struct world *world) { int i; double x, y, r, r2; // we turn off aliasing for faster draws set_color(disp, gc, 255, 255, 255); XFillRectangle(disp, back_buf, gc, 0, 0, WIDTH, HEIGHT); for (i = 0; i < world->num_bodies; i++) { r = radius[i]; x = world->bodies[i].x - r; y = world->bodies[i].y - r; r2 = r + r; // draw body set_color(disp, gc, 255 * 7 / 10, 255 * 7 / 10, 255 * 7 / 10); XFillArc(disp, back_buf, gc, x, y, r2, r2, 0, 360 * 64); set_color(disp, gc, 0, 0, 0); XDrawArc(disp, back_buf, gc, x, y, r2, r2, 0, 360 * 64); } } void collision_step(struct world *world) { int a, b; double r, x, y, vx, vy; // Impose screen boundaries by reversing direction if body is off screen for (a = 0; a < world->num_bodies; a++) { r = radius[a]; x = world->bodies[a].x; y = world->bodies[a].y; vx = speed[a].vx; vy = speed[a].vy; if (x - r < 0) { // left edge if (vx < 0) { speed[a].vx = -C_REST * vx; } world->bodies[a].x = r; } else if (x + r > WIDTH) { // right edge if (vx > 0) { speed[a].vx = -C_REST * vx; } world->bodies[a].x = WIDTH - r; } if (y - r < 0) { // bottom edge if (vy < 0) { speed[a].vy = -C_REST * vy; } world->bodies[a].y = r; } else if (y + r > HEIGHT) { // top edge if (vy > 0) { speed[a].vy = -C_REST * vy; } world->bodies[a].y = HEIGHT - r; } } } void position_step(struct world *world, double time_res) { /* The forces array stores the x and y components of the total force acting * on each body. The forces are index like this: * F on body i in the x dir = F_x[i] * F on body i in the y dir = F_y[i] */ double *force_x = (double *) malloc(sizeof(double) * world->num_bodies); double *force_y = (double *) malloc(sizeof(double) * world->num_bodies); // initialize all forces to zero force_x = memset(force_x, 0, sizeof(double) * world->num_bodies); force_y = memset(force_y, 0, sizeof(double) * world->num_bodies); /* Compute the net force on each body */ #pragma omp parallel for for (int i = 0; i < world->num_bodies; i++) { double d, d_cubed, diff_x, diff_y; for (int j = 0; j < world->num_bodies; j++) { if (i == j) { continue; } // Compute the x and y distances and total distance d between // bodies i and j diff_x = world->bodies[j].x - world->bodies[i].x; diff_y = world->bodies[j].y - world->bodies[i].y; d = sqrt((diff_x * diff_x) + (diff_y * diff_y)); if (d < 25) { d = 25; } d_cubed = d * d * d; // Add force due to j to total force on i force_x[i] += GRAV * (world->bodies[i].m * world->bodies[j].m / d_cubed) * diff_x; force_y[i] += GRAV * (world->bodies[i].m * world->bodies[j].m / d_cubed) * diff_y; } } #pragma omp barrier // Update the velocity and position of each body #pragma omp parallel for for (int i = 0; i < world->num_bodies; i++) { // Update velocities speed[i].vx += force_x[i] * time_res / world->bodies[i].m; speed[i].vy += force_y[i] * time_res / world->bodies[i].m; // Update positions world->bodies[i].x += speed[i].vx * time_res; world->bodies[i].y += speed[i].vy * time_res; } } void step_world(struct world *world, double time_res) { struct tms ttt; clock_t start, end; start = times(&ttt); position_step(world, time_res); end = times(&ttt); total_time += end - start; collision_step(world); } /* Main method runs initialize() and update() */ int main(int argc, char **argv) { //total_time.tv_sec = 0; //total_time.tv_usec = 0; /* get num bodies from the command line */ int num_bodies, threads; num_bodies = DEF_NUM_BODIES; threads = 1; if (argc == 2) { num_bodies = atoi(argv[1]); }; int thread_list[13]={1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 20, 30, 40}; FILE* fstream=fopen("outdata","a+"); fprintf(fstream,"Universe has %d bodies\n", num_bodies); for (int i = 0; i < 13; ++i) { threads = thread_list[i]; printf("Universe has %d bodies. %d Threads\n", num_bodies, threads); omp_set_num_threads(threads); /* set up the universe */ time_t cur_time; time(&cur_time); srand48((long) cur_time); // seed the RNG used in create_world struct world *world = create_world(num_bodies); /* set up graphics using Xlib */ #if NOT_RUN_ON_PI Display *disp = XOpenDisplay(NULL); int scr = DefaultScreen(disp); Window win = XCreateSimpleWindow( disp, RootWindow(disp, scr), 0, 0, WIDTH, HEIGHT, 0, BlackPixel(disp, scr), WhitePixel(disp, scr)); XStoreName(disp, win, "N-Body Simulator"); Pixmap back_buf = XCreatePixmap(disp, RootWindow(disp, scr), WIDTH, HEIGHT, DefaultDepth(disp, scr)); GC gc = XCreateGC(disp, back_buf, 0, 0); // Make sure we're only looking for messages about closing the window Atom del_window = XInternAtom(disp, "WM_DELETE_WINDOW", 0); XSetWMProtocols(disp, win, &del_window, 1); XSelectInput(disp, win, StructureNotifyMask); XMapWindow(disp, win); XEvent event; // wait until window is mapped while (1) { XNextEvent(disp, &event); if (event.type == MapNotify) { break; } } #endif struct timespec delay = {0, 1000000000 / 60}; // for 60 FPS struct timespec remaining; double delta_t = 0.1; int ii; total_time=0; for (ii = 0; ii < iteration_times; ii++) { // check if the window has been closed #if NOT_RUN_ON_PI if (XCheckTypedEvent(disp, ClientMessage, &event)) { break; } // we first draw to the back buffer then copy it to the front (`win`) draw_world(disp, back_buf, gc, world); XCopyArea(disp, back_buf, win, gc, 0, 0, WIDTH, HEIGHT, 0, 0); #endif step_world(world, delta_t); //if you want to watch the process in 60 FPS //nanosleep(&delay, &remaining); } // printf("Total Time = %f\n", (double)total_time.tv_sec + (double)total_time.tv_usec/1000000); fprintf(fstream,"%d %lfs\n", threads,(double) total_time / (sysconf(_SC_CLK_TCK))); #if NOT_RUN_ON_PI XFreeGC(disp, gc); XFreePixmap(disp, back_buf); XDestroyWindow(disp, win); XCloseDisplay(disp); #endif } fclose(fstream); return 0; }
pool_ut.h
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #pragma once #include <gflags/gflags.h> #include <gtest/gtest.h> #include <string> #include <vector> #include "lite/core/context.h" #include "lite/core/profile/timer.h" #include "lite/operators/op_params.h" #include "lite/tests/utils/fill_data.h" #include "lite/tests/utils/naive_math_impl.h" #include "lite/tests/utils/print_info.h" #include "lite/tests/utils/tensor_utils.h" #ifdef LITE_WITH_ARM #include "lite/kernels/arm/pool_compute.h" #ifdef ENABLE_ARM_FP16 typedef __fp16 float16_t; #endif #endif // LITE_WITH_ARM DEFINE_int32(power_mode, 3, "power mode: " "0 for POWER_HIGH;" "1 for POWER_LOW;" "2 for POWER_FULL;" "3 for NO_BIND"); DEFINE_int32(threads, 1, "threads num"); DEFINE_int32(warmup, 0, "warmup times"); DEFINE_int32(repeats, 1, "repeats times"); DEFINE_bool(basic_test, false, "do all tests"); DEFINE_bool(check_result, true, "check the result"); DEFINE_int32(batch, 1, "batch size"); DEFINE_int32(in_channel, 32, "input channel"); DEFINE_int32(in_height, 112, "input height"); DEFINE_int32(in_width, 112, "input width"); DEFINE_int32(kernel_h, 3, "kernel height"); DEFINE_int32(kernel_w, 3, "kernel width"); DEFINE_int32(pad_h, 1, "pad height"); DEFINE_int32(pad_w, 1, "pad width"); DEFINE_int32(stride_h, 1, "stride height"); DEFINE_int32(stride_w, 1, "stride width"); DEFINE_bool(ceil_mode, true, "do ceil_mode"); DEFINE_bool(flag_global, true, "global pooling"); DEFINE_bool(exclusive, true, "do exclusive"); DEFINE_bool(adaptive, false, "no do adaptive"); DEFINE_bool(use_quantizer, false, "no do use_quantizer"); DEFINE_string(pooling_type, "max", "do max pooling"); typedef paddle::lite::DDim DDim; typedef paddle::lite::Tensor Tensor; typedef paddle::lite::operators::PoolParam PoolParam; using paddle::lite::profile::Timer; DDim compute_out_dim(const DDim& dim_in, const paddle::lite::operators::PoolParam& param) { DDim dim_out = dim_in; auto kernel_h = param.ksize[0]; auto kernel_w = param.ksize[1]; auto h = dim_in[2]; auto w = dim_in[3]; auto paddings = *param.paddings; int stride_h = param.strides[0]; int stride_w = param.strides[1]; bool ceil_mode = param.ceil_mode; bool flag_global = param.global_pooling; int hout = 1; int wout = 1; if (!flag_global) { if (!ceil_mode) { hout = (h - kernel_h + paddings[0] + paddings[1]) / stride_h + 1; wout = (w - kernel_w + paddings[2] + paddings[3]) / stride_w + 1; } else { hout = (h - kernel_h + paddings[0] + paddings[1] + stride_h - 1) / stride_h + 1; wout = (w - kernel_w + paddings[2] + paddings[3] + stride_w - 1) / stride_w + 1; } } dim_out[2] = hout; dim_out[3] = wout; return dim_out; } //! for float, dtype1 and type2 is float //! for int8, dytpe1 is char, dtype2 is int template <typename Dtype1, typename Dtype2> void pooling_basic(const Dtype1* din, Dtype2* dout, int num, int chout, int hout, int wout, int chin, int hin, int win, const std::vector<int>& ksize, const std::vector<int>& strides, const std::vector<int>& paddings, bool global_pooling, bool exclusive, bool adaptive, bool ceil_mode, bool use_quantizer, const std::string& pooling_type) { // no need to pad input tensor, border is zero pad inside this function memset(dout, 0, num * chout * hout * wout * sizeof(Dtype2)); int kernel_h = ksize[0]; int kernel_w = ksize[1]; int stride_h = strides[0]; int stride_w = strides[1]; int pad_h = paddings[0]; int pad_w = paddings[2]; int size_channel_in = win * hin; int size_channel_out = wout * hout; if (global_pooling) { if (pooling_type == "max") { // Pooling_max for (int n = 0; n < num; ++n) { Dtype2* dout_batch = dout + n * chout * size_channel_out; const Dtype1* din_batch = din + n * chin * size_channel_in; #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int c = 0; c < chout; ++c) { const Dtype1* din_ch = din_batch + c * size_channel_in; // in address Dtype1 tmp1 = din_ch[0]; for (int i = 0; i < size_channel_in; ++i) { Dtype1 tmp2 = din_ch[i]; tmp1 = tmp1 > tmp2 ? tmp1 : tmp2; } dout_batch[c] = tmp1; } } } else if (pooling_type == "avg") { // Pooling_average_include_padding // Pooling_average_exclude_padding for (int n = 0; n < num; ++n) { Dtype2* dout_batch = dout + n * chout * size_channel_out; const Dtype1* din_batch = din + n * chin * size_channel_in; #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int c = 0; c < chout; ++c) { const Dtype1* din_ch = din_batch + c * size_channel_in; // in address Dtype1 sum = 0.f; for (int i = 0; i < size_channel_in; ++i) { sum += din_ch[i]; } dout_batch[c] = sum / size_channel_in; } } } else { LOG(FATAL) << "unsupported pooling type: " << pooling_type; } } else { for (int ind_n = 0; ind_n < num; ++ind_n) { #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int ind_c = 0; ind_c < chin; ++ind_c) { for (int ind_h = 0; ind_h < hout; ++ind_h) { int sh = ind_h * stride_h; int eh = sh + kernel_h; sh = (sh - pad_h) < 0 ? 0 : sh - pad_h; eh = (eh - pad_h) > hin ? hin : eh - pad_h; for (int ind_w = 0; ind_w < wout; ++ind_w) { int sw = ind_w * stride_w; int ew = sw + kernel_w; sw = (sw - pad_w) < 0 ? 0 : sw - pad_w; ew = (ew - pad_w) > win ? win : ew - pad_w; Dtype1 result = static_cast<Dtype1>(0); int dst_ind = (ind_n * chout + ind_c) * size_channel_out + ind_h * wout + ind_w; for (int kh = sh; kh < eh; ++kh) { for (int kw = sw; kw < ew; ++kw) { int src_ind = (ind_n * chin + ind_c) * size_channel_in + kh * win + kw; if (kh == sh && kw == sw) { result = din[src_ind]; } else { if (pooling_type == "max") { result = result >= din[src_ind] ? result : din[src_ind]; } else if (pooling_type == "avg") { result += din[src_ind]; } } } } if (pooling_type == "avg") { if (exclusive) { int div = (ew - sw) * (eh - sh); div = div > 0 ? div : 1; result /= div; } else { int bh = kernel_h; int bw = kernel_w; if (ew == win) { bw = (sw + kernel_w) >= (win + paddings[3]) ? (win + paddings[3]) : (sw + kernel_w); bw -= sw; if ((sw - pad_w) < 0 && (sw + kernel_w) > (win + paddings[3])) { bw += pad_w; } } if (eh == hin) { bh = (sh + kernel_h) >= (hin + paddings[1]) ? (hin + paddings[1]) : (sh + kernel_h); bh -= sh; if ((sh - pad_h) < 0 && (sh + kernel_h) > (hin + paddings[1])) { bh += pad_h; } } result /= bh * bw; } } dout[dst_ind] = result; } } } } } } void print_pool_success_or_fail_info(std::string op_name, bool has_success, const DDim dim_in, const DDim dim_out, std::vector<int> ksize, std::vector<int> pads, std::vector<int> strides, bool flag_global, std::string pooling_type, bool ceil_mode, bool exclusive, int th, int cls) { if (has_success) { VLOG(4) << op_name << " input: " << dim_in << ", output: " << dim_out << ", kernel dim: " << ksize[0] << ", " << ksize[1] << ", pad: " << pads[0] << ", " << pads[1] << ", " << pads[2] << ", " << pads[3] << ", stride: " << strides[0] << ", " << strides[1] << ", global_pooling: " << (flag_global ? "global" : "false") << ", pooling_type: " << pooling_type << ", ceil_mode: " << (ceil_mode ? "true" : "false") << ", exclusive: " << (exclusive ? "true" : "false") << ", threads: " << th << ", power_mode: " << cls << " successed!!\n"; } else { LOG(FATAL) << op_name << " input: " << dim_in << ", output: " << dim_out << ", kernel dim: " << ksize[0] << ", " << ksize[1] << ", pad: " << pads[0] << ", " << pads[1] << ", " << pads[2] << ", " << pads[3] << ", stride: " << strides[0] << ", " << strides[1] << ", global_pooling: " << (flag_global ? "global" : "false") << ", pooling_type: " << pooling_type << ", ceil_mode: " << (ceil_mode ? "true" : "false") << ", exclusive: " << (exclusive ? "true" : "false") << ", threads: " << th << ", power_mode: " << cls << " failed!!\n"; } }
GB_kroner.c
//------------------------------------------------------------------------------ // GB_kroner: Kronecker product, C = kron (A,B) //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // C = kron(A,B) where op determines the binary multiplier to use. The type of // A and B are compatible with the x and y inputs of z=op(x,y), but can be // different. The type of C is the type of z. C is hypersparse if either A // or B are hypersparse. // FUTURE: this would be faster with built-in types and operators. // FUTURE: at most one thread is used for each vector of C=kron(A,B). The // matrix C is normally very large, but if both A and B are n-by-1, then C is // n^2-by-1 and only a single thread is used. A better method for this case // would construct vectors of C in parallel. // FUTURE: each vector C(:,k) takes O(nnz(C(:,k))) work, but this is not // accounted for in the parallel load-balancing. #include "GB_kron.h" #define GB_FREE_WORK \ { \ GB_phbix_free (A2) ; \ GB_phbix_free (B2) ; \ } #define GB_FREE_ALL \ { \ GB_FREE_WORK ; \ GB_phbix_free (C) ; \ } GrB_Info GB_kroner // C = kron (A,B) ( GrB_Matrix C, // output matrix (static header) const bool C_is_csc, // desired format of C const GrB_BinaryOp op, // multiply operator const GrB_Matrix A_in, // input matrix bool A_is_pattern, // true if values of A are not used const GrB_Matrix B_in, // input matrix bool B_is_pattern, // true if values of B are not used GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- GrB_Info info ; ASSERT (C != NULL && C->static_header) ; struct GB_Matrix_opaque A2_header, B2_header ; GrB_Matrix A2 = GB_clear_static_header (&A2_header) ; GrB_Matrix B2 = GB_clear_static_header (&B2_header) ; ASSERT_MATRIX_OK (A_in, "A_in for kron (A,B)", GB0) ; ASSERT_MATRIX_OK (B_in, "B_in for kron (A,B)", GB0) ; ASSERT_BINARYOP_OK (op, "op for kron (A,B)", GB0) ; //-------------------------------------------------------------------------- // finish any pending work //-------------------------------------------------------------------------- GB_MATRIX_WAIT (A_in) ; GB_MATRIX_WAIT (B_in) ; //-------------------------------------------------------------------------- // bitmap case: create sparse copies of A and B if they are bitmap //-------------------------------------------------------------------------- GrB_Matrix A = A_in ; if (GB_IS_BITMAP (A)) { GBURBLE ("A:") ; GB_OK (GB_dup2 (&A2, A, true, A->type, Context)) ; // static header GB_OK (GB_convert_bitmap_to_sparse (A2, Context)) ; A = A2 ; } GrB_Matrix B = B_in ; if (GB_IS_BITMAP (B)) { GBURBLE ("B:") ; GB_OK (GB_dup2 (&B2, B, true, B->type, Context)) ; // static header GB_OK (GB_convert_bitmap_to_sparse (B2, Context)) ; B = B2 ; } //-------------------------------------------------------------------------- // get inputs //-------------------------------------------------------------------------- const int64_t *restrict Ap = A->p ; const int64_t *restrict Ah = A->h ; const int64_t *restrict Ai = A->i ; const GB_void *restrict Ax = A_is_pattern ? NULL : ((GB_void *) A->x) ; const int64_t asize = A->type->size ; const int64_t avlen = A->vlen ; const int64_t avdim = A->vdim ; int64_t anvec = A->nvec ; int64_t anz = GB_NNZ (A) ; const int64_t *restrict Bp = B->p ; const int64_t *restrict Bh = B->h ; const int64_t *restrict Bi = B->i ; const GB_void *restrict Bx = B_is_pattern ? NULL : ((GB_void *) B->x) ; const int64_t bsize = B->type->size ; const int64_t bvlen = B->vlen ; const int64_t bvdim = B->vdim ; int64_t bnvec = B->nvec ; int64_t bnz = GB_NNZ (B) ; //-------------------------------------------------------------------------- // determine the number of threads to use //-------------------------------------------------------------------------- double work = ((double) anz) * ((double) bnz) + (((double) anvec) * ((double) bnvec)) ; GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; int nthreads = GB_nthreads (work, chunk, nthreads_max) ; //-------------------------------------------------------------------------- // allocate the output matrix C //-------------------------------------------------------------------------- // C has the same type as z for the multiply operator, z=op(x,y) GrB_Index cvlen, cvdim, cnzmax, cnvec ; bool ok = GB_Index_multiply (&cvlen, avlen, bvlen) ; ok = ok & GB_Index_multiply (&cvdim, avdim, bvdim) ; ok = ok & GB_Index_multiply (&cnzmax, anz, bnz) ; ok = ok & GB_Index_multiply (&cnvec, anvec, bnvec) ; ASSERT (ok) ; // C is hypersparse if either A or B are hypersparse. It is never bitmap. bool C_is_hyper = (cvdim > 1) && (Ah != NULL || Bh != NULL) ; bool C_is_full = GB_is_dense (A) && GB_is_dense (B) ; int sparsity = C_is_full ? GxB_FULL : ((C_is_hyper) ? GxB_HYPERSPARSE : GxB_SPARSE) ; GB_OK (GB_new_bix (&C, true, // full, sparse, or hyper; static header op->ztype, (int64_t) cvlen, (int64_t) cvdim, GB_Ap_malloc, C_is_csc, sparsity, true, B->hyper_switch, cnvec, cnzmax, true, Context)) ; //-------------------------------------------------------------------------- // get C and the operator //-------------------------------------------------------------------------- int64_t *restrict Cp = C->p ; int64_t *restrict Ch = C->h ; int64_t *restrict Ci = C->i ; GB_void *restrict Cx = (GB_void *) C->x ; int64_t *restrict Cx_int64 = NULL ; int32_t *restrict Cx_int32 = NULL ; const int64_t csize = C->type->size ; GxB_binary_function fmult = op->function ; GB_Opcode opcode = op->opcode ; bool op_is_positional = GB_OPCODE_IS_POSITIONAL (opcode) ; GB_cast_function cast_A = NULL, cast_B = NULL ; if (!A_is_pattern) { cast_A = GB_cast_factory (op->xtype->code, A->type->code) ; } if (!B_is_pattern) { cast_B = GB_cast_factory (op->ytype->code, B->type->code) ; } int64_t offset = 0 ; if (op_is_positional) { offset = GB_positional_offset (opcode) ; Cx_int64 = (int64_t *) Cx ; Cx_int32 = (int32_t *) Cx ; } bool is64 = (op->ztype == GrB_INT64) ; //-------------------------------------------------------------------------- // compute the column counts of C, and C->h if C is hypersparse //-------------------------------------------------------------------------- int64_t kC ; if (!C_is_full) { #pragma omp parallel for num_threads(nthreads) schedule(guided) for (kC = 0 ; kC < cnvec ; kC++) { int64_t kA = kC / bnvec ; int64_t kB = kC % bnvec ; // get A(:,jA), the (kA)th vector of A int64_t jA = GBH (Ah, kA) ; int64_t aknz = (Ap == NULL) ? avlen : (Ap [kA+1] - Ap [kA]) ; // get B(:,jB), the (kB)th vector of B int64_t jB = GBH (Bh, kB) ; int64_t bknz = (Bp == NULL) ? bvlen : (Bp [kB+1] - Bp [kB]) ; // determine # entries in C(:,jC), the (kC)th vector of C // int64_t kC = kA * bnvec + kB ; if (!C_is_full) { Cp [kC] = aknz * bknz ; } if (C_is_hyper) { Ch [kC] = jA * bvdim + jB ; } } GB_cumsum (Cp, cnvec, &(C->nvec_nonempty), nthreads, Context) ; if (C_is_hyper) C->nvec = cnvec ; } C->magic = GB_MAGIC ; //-------------------------------------------------------------------------- // C = kron (A,B) //-------------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(guided) for (kC = 0 ; kC < cnvec ; kC++) { int64_t kA = kC / bnvec ; int64_t kB = kC % bnvec ; // get B(:,jB), the (kB)th vector of B int64_t jB = GBH (Bh, kB) ; int64_t pB_start = GBP (Bp, kB, bvlen) ; int64_t pB_end = GBP (Bp, kB+1, bvlen) ; int64_t bknz = pB_start - pB_end ; if (bknz == 0) continue ; GB_void bwork [GB_VLA(bsize)] ; // get C(:,jC), the (kC)th vector of C // int64_t kC = kA * bnvec + kB ; int64_t pC = GBP (Cp, kC, cvlen) ; // get A(:,jA), the (kA)th vector of A int64_t jA = GBH (Ah, kA) ; int64_t pA_start = GBP (Ap, kA, avlen) ; int64_t pA_end = GBP (Ap, kA+1, avlen) ; GB_void awork [GB_VLA(asize)] ; for (int64_t pA = pA_start ; pA < pA_end ; pA++) { // awork = A(iA,jA), typecasted to op->xtype int64_t iA = GBI (Ai, pA, avlen) ; int64_t iAblock = iA * bvlen ; if (!A_is_pattern) cast_A (awork, Ax +(pA*asize), asize) ; for (int64_t pB = pB_start ; pB < pB_end ; pB++) { // bwork = B(iB,jB), typecasted to op->ytype int64_t iB = GBI (Bi, pB, bvlen) ; if (!B_is_pattern) cast_B (bwork, Bx +(pB*bsize), bsize) ; // C(iC,jC) = A(iA,jA) * B(iB,jB) if (!C_is_full) { int64_t iC = iAblock + iB ; Ci [pC] = iC ; } if (op_is_positional) { // positional binary operator switch (opcode) { case GB_FIRSTI_opcode : // z = first_i(A(iA,jA),y) == iA case GB_FIRSTI1_opcode : // z = first_i1(A(iA,jA),y) == iA+1 if (is64) { Cx_int64 [pC] = iA + offset ; } else { Cx_int32 [pC] = (int32_t) (iA + offset) ; } break ; case GB_FIRSTJ_opcode : // z = first_j(A(iA,jA),y) == jA case GB_FIRSTJ1_opcode : // z = first_j1(A(iA,jA),y) == jA+1 if (is64) { Cx_int64 [pC] = jA + offset ; } else { Cx_int32 [pC] = (int32_t) (jA + offset) ; } break ; case GB_SECONDI_opcode : // z = second_i(x,B(iB,jB)) == iB case GB_SECONDI1_opcode : // z = second_i1(x,B(iB,jB)) == iB+1 if (is64) { Cx_int64 [pC] = iB + offset ; } else { Cx_int32 [pC] = (int32_t) (iB + offset) ; } break ; case GB_SECONDJ_opcode : // z = second_j(x,B(iB,jB)) == jB case GB_SECONDJ1_opcode : // z = second_j1(x,B(iB,jB)) == jB+1 if (is64) { Cx_int64 [pC] = jB + offset ; } else { Cx_int32 [pC] = (int32_t) (jB + offset) ; } break ; default: ; } } else { // standard binary operator fmult (Cx +(pC*csize), awork, bwork) ; } pC++ ; } } } //-------------------------------------------------------------------------- // remove empty vectors from C, if hypersparse //-------------------------------------------------------------------------- GB_OK (GB_hypermatrix_prune (C, Context)) ; //-------------------------------------------------------------------------- // return result //-------------------------------------------------------------------------- ASSERT_MATRIX_OK (C, "C=kron(A,B)", GB0) ; GB_FREE_WORK ; return (GrB_SUCCESS) ; }
feature.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % FFFFF EEEEE AAA TTTTT U U RRRR EEEEE % % F E A A T U U R R E % % FFF EEE AAAAA T U U RRRR EEE % % F E A A T U U R R E % % F EEEEE A A T UUU R R EEEEE % % % % % % MagickCore Image Feature Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/animate.h" #include "magick/artifact.h" #include "magick/blob.h" #include "magick/blob-private.h" #include "magick/cache.h" #include "magick/cache-private.h" #include "magick/cache-view.h" #include "magick/channel.h" #include "magick/client.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/composite.h" #include "magick/composite-private.h" #include "magick/compress.h" #include "magick/constitute.h" #include "magick/deprecate.h" #include "magick/display.h" #include "magick/draw.h" #include "magick/enhance.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/feature.h" #include "magick/gem.h" #include "magick/geometry.h" #include "magick/list.h" #include "magick/image-private.h" #include "magick/magic.h" #include "magick/magick.h" #include "magick/matrix.h" #include "magick/memory_.h" #include "magick/module.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/morphology-private.h" #include "magick/option.h" #include "magick/paint.h" #include "magick/pixel-private.h" #include "magick/profile.h" #include "magick/property.h" #include "magick/quantize.h" #include "magick/random_.h" #include "magick/resource_.h" #include "magick/segment.h" #include "magick/semaphore.h" #include "magick/signature-private.h" #include "magick/string_.h" #include "magick/thread-private.h" #include "magick/timer.h" #include "magick/token.h" #include "magick/utility.h" #include "magick/version.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C a n n y E d g e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CannyEdgeImage() uses a multi-stage algorithm to detect a wide range of % edges in images. % % The format of the CannyEdgeImage method is: % % Image *CannyEdgeImage(const Image *image,const double radius, % const double sigma,const double lower_percent, % const double upper_percent,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the gaussian smoothing filter. % % o sigma: the sigma of the gaussian smoothing filter. % % o lower_percent: percentage of edge pixels in the lower threshold. % % o upper_percent: percentage of edge pixels in the upper threshold. % % o exception: return any errors or warnings in this structure. % */ typedef struct _CannyInfo { double magnitude, intensity; int orientation; ssize_t x, y; } CannyInfo; static inline MagickBooleanType IsAuthenticPixel(const Image *image, const ssize_t x,const ssize_t y) { if ((x < 0) || (x >= (ssize_t) image->columns)) return(MagickFalse); if ((y < 0) || (y >= (ssize_t) image->rows)) return(MagickFalse); return(MagickTrue); } static MagickBooleanType TraceEdges(Image *edge_image,CacheView *edge_view, MatrixInfo *canny_cache,const ssize_t x,const ssize_t y, const double lower_threshold,ExceptionInfo *exception) { CannyInfo edge, pixel; MagickBooleanType status; register PixelPacket *q; register ssize_t i; q=GetCacheViewAuthenticPixels(edge_view,x,y,1,1,exception); if (q == (PixelPacket *) NULL) return(MagickFalse); q->red=QuantumRange; q->green=QuantumRange; q->blue=QuantumRange; status=SyncCacheViewAuthenticPixels(edge_view,exception); if (status == MagickFalse) return(MagickFalse); if (GetMatrixElement(canny_cache,0,0,&edge) == MagickFalse) return(MagickFalse); edge.x=x; edge.y=y; if (SetMatrixElement(canny_cache,0,0,&edge) == MagickFalse) return(MagickFalse); for (i=1; i != 0; ) { ssize_t v; i--; status=GetMatrixElement(canny_cache,i,0,&edge); if (status == MagickFalse) return(MagickFalse); for (v=(-1); v <= 1; v++) { ssize_t u; for (u=(-1); u <= 1; u++) { if ((u == 0) && (v == 0)) continue; if (IsAuthenticPixel(edge_image,edge.x+u,edge.y+v) == MagickFalse) continue; /* Not an edge if gradient value is below the lower threshold. */ q=GetCacheViewAuthenticPixels(edge_view,edge.x+u,edge.y+v,1,1, exception); if (q == (PixelPacket *) NULL) return(MagickFalse); status=GetMatrixElement(canny_cache,edge.x+u,edge.y+v,&pixel); if (status == MagickFalse) return(MagickFalse); if ((GetPixelIntensity(edge_image,q) == 0.0) && (pixel.intensity >= lower_threshold)) { q->red=QuantumRange; q->green=QuantumRange; q->blue=QuantumRange; status=SyncCacheViewAuthenticPixels(edge_view,exception); if (status == MagickFalse) return(MagickFalse); edge.x+=u; edge.y+=v; status=SetMatrixElement(canny_cache,i,0,&edge); if (status == MagickFalse) return(MagickFalse); i++; } } } } return(MagickTrue); } MagickExport Image *CannyEdgeImage(const Image *image,const double radius, const double sigma,const double lower_percent,const double upper_percent, ExceptionInfo *exception) { #define CannyEdgeImageTag "CannyEdge/Image" CacheView *edge_view; CannyInfo element; char geometry[MaxTextExtent]; double lower_threshold, max, min, upper_threshold; Image *edge_image; KernelInfo *kernel_info; MagickBooleanType status; MagickOffsetType progress; MatrixInfo *canny_cache; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); /* Filter out noise. */ (void) FormatLocaleString(geometry,MaxTextExtent, "blur:%.20gx%.20g;blur:%.20gx%.20g+90",radius,sigma,radius,sigma); kernel_info=AcquireKernelInfo(geometry); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); edge_image=MorphologyImageChannel(image,DefaultChannels,ConvolveMorphology,1, kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); if (edge_image == (Image *) NULL) return((Image *) NULL); if (TransformImageColorspace(edge_image,GRAYColorspace) == MagickFalse) { edge_image=DestroyImage(edge_image); return((Image *) NULL); } (void) SetImageAlphaChannel(edge_image,DeactivateAlphaChannel); /* Find the intensity gradient of the image. */ canny_cache=AcquireMatrixInfo(edge_image->columns,edge_image->rows, sizeof(CannyInfo),exception); if (canny_cache == (MatrixInfo *) NULL) { edge_image=DestroyImage(edge_image); return((Image *) NULL); } status=MagickTrue; edge_view=AcquireVirtualCacheView(edge_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_number_threads(edge_image,edge_image,edge_image->rows,1) #endif for (y=0; y < (ssize_t) edge_image->rows; y++) { register const PixelPacket *magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns+1,2, exception); if (p == (const PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) edge_image->columns; x++) { CannyInfo pixel; double dx, dy; register const PixelPacket *magick_restrict kernel_pixels; ssize_t v; static double Gx[2][2] = { { -1.0, +1.0 }, { -1.0, +1.0 } }, Gy[2][2] = { { +1.0, +1.0 }, { -1.0, -1.0 } }; (void) ResetMagickMemory(&pixel,0,sizeof(pixel)); dx=0.0; dy=0.0; kernel_pixels=p; for (v=0; v < 2; v++) { ssize_t u; for (u=0; u < 2; u++) { double intensity; intensity=GetPixelIntensity(edge_image,kernel_pixels+u); dx+=0.5*Gx[v][u]*intensity; dy+=0.5*Gy[v][u]*intensity; } kernel_pixels+=edge_image->columns+1; } pixel.magnitude=hypot(dx,dy); pixel.orientation=0; if (fabs(dx) > MagickEpsilon) { double slope; slope=dy/dx; if (slope < 0.0) { if (slope < -2.41421356237) pixel.orientation=0; else if (slope < -0.414213562373) pixel.orientation=1; else pixel.orientation=2; } else { if (slope > 2.41421356237) pixel.orientation=0; else if (slope > 0.414213562373) pixel.orientation=3; else pixel.orientation=2; } } if (SetMatrixElement(canny_cache,x,y,&pixel) == MagickFalse) continue; p++; } } edge_view=DestroyCacheView(edge_view); /* Non-maxima suppression, remove pixels that are not considered to be part of an edge. */ progress=0; (void) GetMatrixElement(canny_cache,0,0,&element); max=element.intensity; min=element.intensity; edge_view=AcquireAuthenticCacheView(edge_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_number_threads(edge_image,edge_image,edge_image->rows,1) #endif for (y=0; y < (ssize_t) edge_image->rows; y++) { register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(edge_view,0,y,edge_image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) edge_image->columns; x++) { CannyInfo alpha_pixel, beta_pixel, pixel; (void) GetMatrixElement(canny_cache,x,y,&pixel); switch (pixel.orientation) { case 0: default: { /* 0 degrees, north and south. */ (void) GetMatrixElement(canny_cache,x,y-1,&alpha_pixel); (void) GetMatrixElement(canny_cache,x,y+1,&beta_pixel); break; } case 1: { /* 45 degrees, northwest and southeast. */ (void) GetMatrixElement(canny_cache,x-1,y-1,&alpha_pixel); (void) GetMatrixElement(canny_cache,x+1,y+1,&beta_pixel); break; } case 2: { /* 90 degrees, east and west. */ (void) GetMatrixElement(canny_cache,x-1,y,&alpha_pixel); (void) GetMatrixElement(canny_cache,x+1,y,&beta_pixel); break; } case 3: { /* 135 degrees, northeast and southwest. */ (void) GetMatrixElement(canny_cache,x+1,y-1,&beta_pixel); (void) GetMatrixElement(canny_cache,x-1,y+1,&alpha_pixel); break; } } pixel.intensity=pixel.magnitude; if ((pixel.magnitude < alpha_pixel.magnitude) || (pixel.magnitude < beta_pixel.magnitude)) pixel.intensity=0; (void) SetMatrixElement(canny_cache,x,y,&pixel); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_CannyEdgeImage) #endif { if (pixel.intensity < min) min=pixel.intensity; if (pixel.intensity > max) max=pixel.intensity; } q->red=0; q->green=0; q->blue=0; q++; } if (SyncCacheViewAuthenticPixels(edge_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_CannyEdgeImage) #endif proceed=SetImageProgress(image,CannyEdgeImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } edge_view=DestroyCacheView(edge_view); /* Estimate hysteresis threshold. */ lower_threshold=lower_percent*(max-min)+min; upper_threshold=upper_percent*(max-min)+min; /* Hysteresis threshold. */ edge_view=AcquireAuthenticCacheView(edge_image,exception); for (y=0; y < (ssize_t) edge_image->rows; y++) { register ssize_t x; if (status == MagickFalse) continue; for (x=0; x < (ssize_t) edge_image->columns; x++) { CannyInfo pixel; register const PixelPacket *magick_restrict p; /* Edge if pixel gradient higher than upper threshold. */ p=GetCacheViewVirtualPixels(edge_view,x,y,1,1,exception); if (p == (const PixelPacket *) NULL) continue; status=GetMatrixElement(canny_cache,x,y,&pixel); if (status == MagickFalse) continue; if ((GetPixelIntensity(edge_image,p) == 0.0) && (pixel.intensity >= upper_threshold)) status=TraceEdges(edge_image,edge_view,canny_cache,x,y,lower_threshold, exception); } } edge_view=DestroyCacheView(edge_view); /* Free resources. */ canny_cache=DestroyMatrixInfo(canny_cache); return(edge_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C h a n n e l F e a t u r e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageChannelFeatures() returns features for each channel in the image in % each of four directions (horizontal, vertical, left and right diagonals) % for the specified distance. The features include the angular second % moment, contrast, correlation, sum of squares: variance, inverse difference % moment, sum average, sum varience, sum entropy, entropy, difference variance,% difference entropy, information measures of correlation 1, information % measures of correlation 2, and maximum correlation coefficient. You can % access the red channel contrast, for example, like this: % % channel_features=GetImageChannelFeatures(image,1,exception); % contrast=channel_features[RedChannel].contrast[0]; % % Use MagickRelinquishMemory() to free the features buffer. % % The format of the GetImageChannelFeatures method is: % % ChannelFeatures *GetImageChannelFeatures(const Image *image, % const size_t distance,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o distance: the distance. % % o exception: return any errors or warnings in this structure. % */ static inline double MagickLog10(const double x) { #define Log10Epsilon (1.0e-11) if (fabs(x) < Log10Epsilon) return(log10(Log10Epsilon)); return(log10(fabs(x))); } MagickExport ChannelFeatures *GetImageChannelFeatures(const Image *image, const size_t distance,ExceptionInfo *exception) { typedef struct _ChannelStatistics { DoublePixelPacket direction[4]; /* horizontal, vertical, left and right diagonals */ } ChannelStatistics; CacheView *image_view; ChannelFeatures *channel_features; ChannelStatistics **cooccurrence, correlation, *density_x, *density_xy, *density_y, entropy_x, entropy_xy, entropy_xy1, entropy_xy2, entropy_y, mean, **Q, *sum, sum_squares, variance; LongPixelPacket gray, *grays; MagickBooleanType status; register ssize_t i; size_t length; ssize_t y; unsigned int number_grays; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((image->columns < (distance+1)) || (image->rows < (distance+1))) return((ChannelFeatures *) NULL); length=CompositeChannels+1UL; channel_features=(ChannelFeatures *) AcquireQuantumMemory(length, sizeof(*channel_features)); if (channel_features == (ChannelFeatures *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) ResetMagickMemory(channel_features,0,length* sizeof(*channel_features)); /* Form grays. */ grays=(LongPixelPacket *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*grays)); if (grays == (LongPixelPacket *) NULL) { channel_features=(ChannelFeatures *) RelinquishMagickMemory( channel_features); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(channel_features); } for (i=0; i <= (ssize_t) MaxMap; i++) { grays[i].red=(~0U); grays[i].green=(~0U); grays[i].blue=(~0U); grays[i].opacity=(~0U); grays[i].index=(~0U); } status=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { grays[ScaleQuantumToMap(GetPixelRed(p))].red= ScaleQuantumToMap(GetPixelRed(p)); grays[ScaleQuantumToMap(GetPixelGreen(p))].green= ScaleQuantumToMap(GetPixelGreen(p)); grays[ScaleQuantumToMap(GetPixelBlue(p))].blue= ScaleQuantumToMap(GetPixelBlue(p)); if (image->colorspace == CMYKColorspace) grays[ScaleQuantumToMap(GetPixelIndex(indexes+x))].index= ScaleQuantumToMap(GetPixelIndex(indexes+x)); if (image->matte != MagickFalse) grays[ScaleQuantumToMap(GetPixelOpacity(p))].opacity= ScaleQuantumToMap(GetPixelOpacity(p)); p++; } } image_view=DestroyCacheView(image_view); if (status == MagickFalse) { grays=(LongPixelPacket *) RelinquishMagickMemory(grays); channel_features=(ChannelFeatures *) RelinquishMagickMemory( channel_features); return(channel_features); } (void) ResetMagickMemory(&gray,0,sizeof(gray)); for (i=0; i <= (ssize_t) MaxMap; i++) { if (grays[i].red != ~0U) grays[(ssize_t) gray.red++].red=grays[i].red; if (grays[i].green != ~0U) grays[(ssize_t) gray.green++].green=grays[i].green; if (grays[i].blue != ~0U) grays[(ssize_t) gray.blue++].blue=grays[i].blue; if (image->colorspace == CMYKColorspace) if (grays[i].index != ~0U) grays[(ssize_t) gray.index++].index=grays[i].index; if (image->matte != MagickFalse) if (grays[i].opacity != ~0U) grays[(ssize_t) gray.opacity++].opacity=grays[i].opacity; } /* Allocate spatial dependence matrix. */ number_grays=gray.red; if (gray.green > number_grays) number_grays=gray.green; if (gray.blue > number_grays) number_grays=gray.blue; if (image->colorspace == CMYKColorspace) if (gray.index > number_grays) number_grays=gray.index; if (image->matte != MagickFalse) if (gray.opacity > number_grays) number_grays=gray.opacity; cooccurrence=(ChannelStatistics **) AcquireQuantumMemory(number_grays, sizeof(*cooccurrence)); density_x=(ChannelStatistics *) AcquireQuantumMemory(2*(number_grays+1), sizeof(*density_x)); density_xy=(ChannelStatistics *) AcquireQuantumMemory(2*(number_grays+1), sizeof(*density_xy)); density_y=(ChannelStatistics *) AcquireQuantumMemory(2*(number_grays+1), sizeof(*density_y)); Q=(ChannelStatistics **) AcquireQuantumMemory(number_grays,sizeof(*Q)); sum=(ChannelStatistics *) AcquireQuantumMemory(number_grays,sizeof(*sum)); if ((cooccurrence == (ChannelStatistics **) NULL) || (density_x == (ChannelStatistics *) NULL) || (density_xy == (ChannelStatistics *) NULL) || (density_y == (ChannelStatistics *) NULL) || (Q == (ChannelStatistics **) NULL) || (sum == (ChannelStatistics *) NULL)) { if (Q != (ChannelStatistics **) NULL) { for (i=0; i < (ssize_t) number_grays; i++) Q[i]=(ChannelStatistics *) RelinquishMagickMemory(Q[i]); Q=(ChannelStatistics **) RelinquishMagickMemory(Q); } if (sum != (ChannelStatistics *) NULL) sum=(ChannelStatistics *) RelinquishMagickMemory(sum); if (density_y != (ChannelStatistics *) NULL) density_y=(ChannelStatistics *) RelinquishMagickMemory(density_y); if (density_xy != (ChannelStatistics *) NULL) density_xy=(ChannelStatistics *) RelinquishMagickMemory(density_xy); if (density_x != (ChannelStatistics *) NULL) density_x=(ChannelStatistics *) RelinquishMagickMemory(density_x); if (cooccurrence != (ChannelStatistics **) NULL) { for (i=0; i < (ssize_t) number_grays; i++) cooccurrence[i]=(ChannelStatistics *) RelinquishMagickMemory(cooccurrence[i]); cooccurrence=(ChannelStatistics **) RelinquishMagickMemory( cooccurrence); } grays=(LongPixelPacket *) RelinquishMagickMemory(grays); channel_features=(ChannelFeatures *) RelinquishMagickMemory( channel_features); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(channel_features); } (void) ResetMagickMemory(&correlation,0,sizeof(correlation)); (void) ResetMagickMemory(density_x,0,2*(number_grays+1)*sizeof(*density_x)); (void) ResetMagickMemory(density_xy,0,2*(number_grays+1)*sizeof(*density_xy)); (void) ResetMagickMemory(density_y,0,2*(number_grays+1)*sizeof(*density_y)); (void) ResetMagickMemory(&mean,0,sizeof(mean)); (void) ResetMagickMemory(sum,0,number_grays*sizeof(*sum)); (void) ResetMagickMemory(&sum_squares,0,sizeof(sum_squares)); (void) ResetMagickMemory(density_xy,0,2*number_grays*sizeof(*density_xy)); (void) ResetMagickMemory(&entropy_x,0,sizeof(entropy_x)); (void) ResetMagickMemory(&entropy_xy,0,sizeof(entropy_xy)); (void) ResetMagickMemory(&entropy_xy1,0,sizeof(entropy_xy1)); (void) ResetMagickMemory(&entropy_xy2,0,sizeof(entropy_xy2)); (void) ResetMagickMemory(&entropy_y,0,sizeof(entropy_y)); (void) ResetMagickMemory(&variance,0,sizeof(variance)); for (i=0; i < (ssize_t) number_grays; i++) { cooccurrence[i]=(ChannelStatistics *) AcquireQuantumMemory(number_grays, sizeof(**cooccurrence)); Q[i]=(ChannelStatistics *) AcquireQuantumMemory(number_grays,sizeof(**Q)); if ((cooccurrence[i] == (ChannelStatistics *) NULL) || (Q[i] == (ChannelStatistics *) NULL)) break; (void) ResetMagickMemory(cooccurrence[i],0,number_grays* sizeof(**cooccurrence)); (void) ResetMagickMemory(Q[i],0,number_grays*sizeof(**Q)); } if (i < (ssize_t) number_grays) { for (i--; i >= 0; i--) { if (Q[i] != (ChannelStatistics *) NULL) Q[i]=(ChannelStatistics *) RelinquishMagickMemory(Q[i]); if (cooccurrence[i] != (ChannelStatistics *) NULL) cooccurrence[i]=(ChannelStatistics *) RelinquishMagickMemory(cooccurrence[i]); } Q=(ChannelStatistics **) RelinquishMagickMemory(Q); cooccurrence=(ChannelStatistics **) RelinquishMagickMemory(cooccurrence); sum=(ChannelStatistics *) RelinquishMagickMemory(sum); density_y=(ChannelStatistics *) RelinquishMagickMemory(density_y); density_xy=(ChannelStatistics *) RelinquishMagickMemory(density_xy); density_x=(ChannelStatistics *) RelinquishMagickMemory(density_x); grays=(LongPixelPacket *) RelinquishMagickMemory(grays); channel_features=(ChannelFeatures *) RelinquishMagickMemory( channel_features); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(channel_features); } /* Initialize spatial dependence matrix. */ status=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p; register ssize_t x; ssize_t i, offset, u, v; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-(ssize_t) distance,y,image->columns+ 2*distance,distance+2,exception); if (p == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); p+=distance; indexes+=distance; for (x=0; x < (ssize_t) image->columns; x++) { for (i=0; i < 4; i++) { switch (i) { case 0: default: { /* Horizontal adjacency. */ offset=(ssize_t) distance; break; } case 1: { /* Vertical adjacency. */ offset=(ssize_t) (image->columns+2*distance); break; } case 2: { /* Right diagonal adjacency. */ offset=(ssize_t) ((image->columns+2*distance)-distance); break; } case 3: { /* Left diagonal adjacency. */ offset=(ssize_t) ((image->columns+2*distance)+distance); break; } } u=0; v=0; while (grays[u].red != ScaleQuantumToMap(GetPixelRed(p))) u++; while (grays[v].red != ScaleQuantumToMap(GetPixelRed(p+offset))) v++; cooccurrence[u][v].direction[i].red++; cooccurrence[v][u].direction[i].red++; u=0; v=0; while (grays[u].green != ScaleQuantumToMap(GetPixelGreen(p))) u++; while (grays[v].green != ScaleQuantumToMap(GetPixelGreen(p+offset))) v++; cooccurrence[u][v].direction[i].green++; cooccurrence[v][u].direction[i].green++; u=0; v=0; while (grays[u].blue != ScaleQuantumToMap(GetPixelBlue(p))) u++; while (grays[v].blue != ScaleQuantumToMap((p+offset)->blue)) v++; cooccurrence[u][v].direction[i].blue++; cooccurrence[v][u].direction[i].blue++; if (image->colorspace == CMYKColorspace) { u=0; v=0; while (grays[u].index != ScaleQuantumToMap(GetPixelIndex(indexes+x))) u++; while (grays[v].index != ScaleQuantumToMap(GetPixelIndex(indexes+x+offset))) v++; cooccurrence[u][v].direction[i].index++; cooccurrence[v][u].direction[i].index++; } if (image->matte != MagickFalse) { u=0; v=0; while (grays[u].opacity != ScaleQuantumToMap(GetPixelOpacity(p))) u++; while (grays[v].opacity != ScaleQuantumToMap((p+offset)->opacity)) v++; cooccurrence[u][v].direction[i].opacity++; cooccurrence[v][u].direction[i].opacity++; } } p++; } } grays=(LongPixelPacket *) RelinquishMagickMemory(grays); image_view=DestroyCacheView(image_view); if (status == MagickFalse) { for (i=0; i < (ssize_t) number_grays; i++) cooccurrence[i]=(ChannelStatistics *) RelinquishMagickMemory(cooccurrence[i]); cooccurrence=(ChannelStatistics **) RelinquishMagickMemory(cooccurrence); channel_features=(ChannelFeatures *) RelinquishMagickMemory( channel_features); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(channel_features); } /* Normalize spatial dependence matrix. */ for (i=0; i < 4; i++) { double normalize; register ssize_t y; switch (i) { case 0: default: { /* Horizontal adjacency. */ normalize=2.0*image->rows*(image->columns-distance); break; } case 1: { /* Vertical adjacency. */ normalize=2.0*(image->rows-distance)*image->columns; break; } case 2: { /* Right diagonal adjacency. */ normalize=2.0*(image->rows-distance)*(image->columns-distance); break; } case 3: { /* Left diagonal adjacency. */ normalize=2.0*(image->rows-distance)*(image->columns-distance); break; } } normalize=PerceptibleReciprocal(normalize); for (y=0; y < (ssize_t) number_grays; y++) { register ssize_t x; for (x=0; x < (ssize_t) number_grays; x++) { cooccurrence[x][y].direction[i].red*=normalize; cooccurrence[x][y].direction[i].green*=normalize; cooccurrence[x][y].direction[i].blue*=normalize; if (image->colorspace == CMYKColorspace) cooccurrence[x][y].direction[i].index*=normalize; if (image->matte != MagickFalse) cooccurrence[x][y].direction[i].opacity*=normalize; } } } /* Compute texture features. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_number_threads(image,image,number_grays,1) #endif for (i=0; i < 4; i++) { register ssize_t y; for (y=0; y < (ssize_t) number_grays; y++) { register ssize_t x; for (x=0; x < (ssize_t) number_grays; x++) { /* Angular second moment: measure of homogeneity of the image. */ channel_features[RedChannel].angular_second_moment[i]+= cooccurrence[x][y].direction[i].red* cooccurrence[x][y].direction[i].red; channel_features[GreenChannel].angular_second_moment[i]+= cooccurrence[x][y].direction[i].green* cooccurrence[x][y].direction[i].green; channel_features[BlueChannel].angular_second_moment[i]+= cooccurrence[x][y].direction[i].blue* cooccurrence[x][y].direction[i].blue; if (image->colorspace == CMYKColorspace) channel_features[BlackChannel].angular_second_moment[i]+= cooccurrence[x][y].direction[i].index* cooccurrence[x][y].direction[i].index; if (image->matte != MagickFalse) channel_features[OpacityChannel].angular_second_moment[i]+= cooccurrence[x][y].direction[i].opacity* cooccurrence[x][y].direction[i].opacity; /* Correlation: measure of linear-dependencies in the image. */ sum[y].direction[i].red+=cooccurrence[x][y].direction[i].red; sum[y].direction[i].green+=cooccurrence[x][y].direction[i].green; sum[y].direction[i].blue+=cooccurrence[x][y].direction[i].blue; if (image->colorspace == CMYKColorspace) sum[y].direction[i].index+=cooccurrence[x][y].direction[i].index; if (image->matte != MagickFalse) sum[y].direction[i].opacity+=cooccurrence[x][y].direction[i].opacity; correlation.direction[i].red+=x*y*cooccurrence[x][y].direction[i].red; correlation.direction[i].green+=x*y* cooccurrence[x][y].direction[i].green; correlation.direction[i].blue+=x*y* cooccurrence[x][y].direction[i].blue; if (image->colorspace == CMYKColorspace) correlation.direction[i].index+=x*y* cooccurrence[x][y].direction[i].index; if (image->matte != MagickFalse) correlation.direction[i].opacity+=x*y* cooccurrence[x][y].direction[i].opacity; /* Inverse Difference Moment. */ channel_features[RedChannel].inverse_difference_moment[i]+= cooccurrence[x][y].direction[i].red/((y-x)*(y-x)+1); channel_features[GreenChannel].inverse_difference_moment[i]+= cooccurrence[x][y].direction[i].green/((y-x)*(y-x)+1); channel_features[BlueChannel].inverse_difference_moment[i]+= cooccurrence[x][y].direction[i].blue/((y-x)*(y-x)+1); if (image->colorspace == CMYKColorspace) channel_features[IndexChannel].inverse_difference_moment[i]+= cooccurrence[x][y].direction[i].index/((y-x)*(y-x)+1); if (image->matte != MagickFalse) channel_features[OpacityChannel].inverse_difference_moment[i]+= cooccurrence[x][y].direction[i].opacity/((y-x)*(y-x)+1); /* Sum average. */ density_xy[y+x+2].direction[i].red+= cooccurrence[x][y].direction[i].red; density_xy[y+x+2].direction[i].green+= cooccurrence[x][y].direction[i].green; density_xy[y+x+2].direction[i].blue+= cooccurrence[x][y].direction[i].blue; if (image->colorspace == CMYKColorspace) density_xy[y+x+2].direction[i].index+= cooccurrence[x][y].direction[i].index; if (image->matte != MagickFalse) density_xy[y+x+2].direction[i].opacity+= cooccurrence[x][y].direction[i].opacity; /* Entropy. */ channel_features[RedChannel].entropy[i]-= cooccurrence[x][y].direction[i].red* MagickLog10(cooccurrence[x][y].direction[i].red); channel_features[GreenChannel].entropy[i]-= cooccurrence[x][y].direction[i].green* MagickLog10(cooccurrence[x][y].direction[i].green); channel_features[BlueChannel].entropy[i]-= cooccurrence[x][y].direction[i].blue* MagickLog10(cooccurrence[x][y].direction[i].blue); if (image->colorspace == CMYKColorspace) channel_features[IndexChannel].entropy[i]-= cooccurrence[x][y].direction[i].index* MagickLog10(cooccurrence[x][y].direction[i].index); if (image->matte != MagickFalse) channel_features[OpacityChannel].entropy[i]-= cooccurrence[x][y].direction[i].opacity* MagickLog10(cooccurrence[x][y].direction[i].opacity); /* Information Measures of Correlation. */ density_x[x].direction[i].red+=cooccurrence[x][y].direction[i].red; density_x[x].direction[i].green+=cooccurrence[x][y].direction[i].green; density_x[x].direction[i].blue+=cooccurrence[x][y].direction[i].blue; if (image->colorspace == CMYKColorspace) density_x[x].direction[i].index+= cooccurrence[x][y].direction[i].index; if (image->matte != MagickFalse) density_x[x].direction[i].opacity+= cooccurrence[x][y].direction[i].opacity; density_y[y].direction[i].red+=cooccurrence[x][y].direction[i].red; density_y[y].direction[i].green+=cooccurrence[x][y].direction[i].green; density_y[y].direction[i].blue+=cooccurrence[x][y].direction[i].blue; if (image->colorspace == CMYKColorspace) density_y[y].direction[i].index+= cooccurrence[x][y].direction[i].index; if (image->matte != MagickFalse) density_y[y].direction[i].opacity+= cooccurrence[x][y].direction[i].opacity; } mean.direction[i].red+=y*sum[y].direction[i].red; sum_squares.direction[i].red+=y*y*sum[y].direction[i].red; mean.direction[i].green+=y*sum[y].direction[i].green; sum_squares.direction[i].green+=y*y*sum[y].direction[i].green; mean.direction[i].blue+=y*sum[y].direction[i].blue; sum_squares.direction[i].blue+=y*y*sum[y].direction[i].blue; if (image->colorspace == CMYKColorspace) { mean.direction[i].index+=y*sum[y].direction[i].index; sum_squares.direction[i].index+=y*y*sum[y].direction[i].index; } if (image->matte != MagickFalse) { mean.direction[i].opacity+=y*sum[y].direction[i].opacity; sum_squares.direction[i].opacity+=y*y*sum[y].direction[i].opacity; } } /* Correlation: measure of linear-dependencies in the image. */ channel_features[RedChannel].correlation[i]= (correlation.direction[i].red-mean.direction[i].red* mean.direction[i].red)/(sqrt(sum_squares.direction[i].red- (mean.direction[i].red*mean.direction[i].red))*sqrt( sum_squares.direction[i].red-(mean.direction[i].red* mean.direction[i].red))); channel_features[GreenChannel].correlation[i]= (correlation.direction[i].green-mean.direction[i].green* mean.direction[i].green)/(sqrt(sum_squares.direction[i].green- (mean.direction[i].green*mean.direction[i].green))*sqrt( sum_squares.direction[i].green-(mean.direction[i].green* mean.direction[i].green))); channel_features[BlueChannel].correlation[i]= (correlation.direction[i].blue-mean.direction[i].blue* mean.direction[i].blue)/(sqrt(sum_squares.direction[i].blue- (mean.direction[i].blue*mean.direction[i].blue))*sqrt( sum_squares.direction[i].blue-(mean.direction[i].blue* mean.direction[i].blue))); if (image->colorspace == CMYKColorspace) channel_features[IndexChannel].correlation[i]= (correlation.direction[i].index-mean.direction[i].index* mean.direction[i].index)/(sqrt(sum_squares.direction[i].index- (mean.direction[i].index*mean.direction[i].index))*sqrt( sum_squares.direction[i].index-(mean.direction[i].index* mean.direction[i].index))); if (image->matte != MagickFalse) channel_features[OpacityChannel].correlation[i]= (correlation.direction[i].opacity-mean.direction[i].opacity* mean.direction[i].opacity)/(sqrt(sum_squares.direction[i].opacity- (mean.direction[i].opacity*mean.direction[i].opacity))*sqrt( sum_squares.direction[i].opacity-(mean.direction[i].opacity* mean.direction[i].opacity))); } /* Compute more texture features. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_number_threads(image,image,number_grays,1) #endif for (i=0; i < 4; i++) { register ssize_t x; for (x=2; x < (ssize_t) (2*number_grays); x++) { /* Sum average. */ channel_features[RedChannel].sum_average[i]+= x*density_xy[x].direction[i].red; channel_features[GreenChannel].sum_average[i]+= x*density_xy[x].direction[i].green; channel_features[BlueChannel].sum_average[i]+= x*density_xy[x].direction[i].blue; if (image->colorspace == CMYKColorspace) channel_features[IndexChannel].sum_average[i]+= x*density_xy[x].direction[i].index; if (image->matte != MagickFalse) channel_features[OpacityChannel].sum_average[i]+= x*density_xy[x].direction[i].opacity; /* Sum entropy. */ channel_features[RedChannel].sum_entropy[i]-= density_xy[x].direction[i].red* MagickLog10(density_xy[x].direction[i].red); channel_features[GreenChannel].sum_entropy[i]-= density_xy[x].direction[i].green* MagickLog10(density_xy[x].direction[i].green); channel_features[BlueChannel].sum_entropy[i]-= density_xy[x].direction[i].blue* MagickLog10(density_xy[x].direction[i].blue); if (image->colorspace == CMYKColorspace) channel_features[IndexChannel].sum_entropy[i]-= density_xy[x].direction[i].index* MagickLog10(density_xy[x].direction[i].index); if (image->matte != MagickFalse) channel_features[OpacityChannel].sum_entropy[i]-= density_xy[x].direction[i].opacity* MagickLog10(density_xy[x].direction[i].opacity); /* Sum variance. */ channel_features[RedChannel].sum_variance[i]+= (x-channel_features[RedChannel].sum_entropy[i])* (x-channel_features[RedChannel].sum_entropy[i])* density_xy[x].direction[i].red; channel_features[GreenChannel].sum_variance[i]+= (x-channel_features[GreenChannel].sum_entropy[i])* (x-channel_features[GreenChannel].sum_entropy[i])* density_xy[x].direction[i].green; channel_features[BlueChannel].sum_variance[i]+= (x-channel_features[BlueChannel].sum_entropy[i])* (x-channel_features[BlueChannel].sum_entropy[i])* density_xy[x].direction[i].blue; if (image->colorspace == CMYKColorspace) channel_features[IndexChannel].sum_variance[i]+= (x-channel_features[IndexChannel].sum_entropy[i])* (x-channel_features[IndexChannel].sum_entropy[i])* density_xy[x].direction[i].index; if (image->matte != MagickFalse) channel_features[OpacityChannel].sum_variance[i]+= (x-channel_features[OpacityChannel].sum_entropy[i])* (x-channel_features[OpacityChannel].sum_entropy[i])* density_xy[x].direction[i].opacity; } } /* Compute more texture features. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_number_threads(image,image,number_grays,1) #endif for (i=0; i < 4; i++) { register ssize_t y; for (y=0; y < (ssize_t) number_grays; y++) { register ssize_t x; for (x=0; x < (ssize_t) number_grays; x++) { /* Sum of Squares: Variance */ variance.direction[i].red+=(y-mean.direction[i].red+1)* (y-mean.direction[i].red+1)*cooccurrence[x][y].direction[i].red; variance.direction[i].green+=(y-mean.direction[i].green+1)* (y-mean.direction[i].green+1)*cooccurrence[x][y].direction[i].green; variance.direction[i].blue+=(y-mean.direction[i].blue+1)* (y-mean.direction[i].blue+1)*cooccurrence[x][y].direction[i].blue; if (image->colorspace == CMYKColorspace) variance.direction[i].index+=(y-mean.direction[i].index+1)* (y-mean.direction[i].index+1)*cooccurrence[x][y].direction[i].index; if (image->matte != MagickFalse) variance.direction[i].opacity+=(y-mean.direction[i].opacity+1)* (y-mean.direction[i].opacity+1)* cooccurrence[x][y].direction[i].opacity; /* Sum average / Difference Variance. */ density_xy[MagickAbsoluteValue(y-x)].direction[i].red+= cooccurrence[x][y].direction[i].red; density_xy[MagickAbsoluteValue(y-x)].direction[i].green+= cooccurrence[x][y].direction[i].green; density_xy[MagickAbsoluteValue(y-x)].direction[i].blue+= cooccurrence[x][y].direction[i].blue; if (image->colorspace == CMYKColorspace) density_xy[MagickAbsoluteValue(y-x)].direction[i].index+= cooccurrence[x][y].direction[i].index; if (image->matte != MagickFalse) density_xy[MagickAbsoluteValue(y-x)].direction[i].opacity+= cooccurrence[x][y].direction[i].opacity; /* Information Measures of Correlation. */ entropy_xy.direction[i].red-=cooccurrence[x][y].direction[i].red* MagickLog10(cooccurrence[x][y].direction[i].red); entropy_xy.direction[i].green-=cooccurrence[x][y].direction[i].green* MagickLog10(cooccurrence[x][y].direction[i].green); entropy_xy.direction[i].blue-=cooccurrence[x][y].direction[i].blue* MagickLog10(cooccurrence[x][y].direction[i].blue); if (image->colorspace == CMYKColorspace) entropy_xy.direction[i].index-=cooccurrence[x][y].direction[i].index* MagickLog10(cooccurrence[x][y].direction[i].index); if (image->matte != MagickFalse) entropy_xy.direction[i].opacity-= cooccurrence[x][y].direction[i].opacity*MagickLog10( cooccurrence[x][y].direction[i].opacity); entropy_xy1.direction[i].red-=(cooccurrence[x][y].direction[i].red* MagickLog10(density_x[x].direction[i].red* density_y[y].direction[i].red)); entropy_xy1.direction[i].green-=(cooccurrence[x][y].direction[i].green* MagickLog10(density_x[x].direction[i].green* density_y[y].direction[i].green)); entropy_xy1.direction[i].blue-=(cooccurrence[x][y].direction[i].blue* MagickLog10(density_x[x].direction[i].blue* density_y[y].direction[i].blue)); if (image->colorspace == CMYKColorspace) entropy_xy1.direction[i].index-=( cooccurrence[x][y].direction[i].index*MagickLog10( density_x[x].direction[i].index*density_y[y].direction[i].index)); if (image->matte != MagickFalse) entropy_xy1.direction[i].opacity-=( cooccurrence[x][y].direction[i].opacity*MagickLog10( density_x[x].direction[i].opacity* density_y[y].direction[i].opacity)); entropy_xy2.direction[i].red-=(density_x[x].direction[i].red* density_y[y].direction[i].red*MagickLog10( density_x[x].direction[i].red*density_y[y].direction[i].red)); entropy_xy2.direction[i].green-=(density_x[x].direction[i].green* density_y[y].direction[i].green*MagickLog10( density_x[x].direction[i].green*density_y[y].direction[i].green)); entropy_xy2.direction[i].blue-=(density_x[x].direction[i].blue* density_y[y].direction[i].blue*MagickLog10( density_x[x].direction[i].blue*density_y[y].direction[i].blue)); if (image->colorspace == CMYKColorspace) entropy_xy2.direction[i].index-=(density_x[x].direction[i].index* density_y[y].direction[i].index*MagickLog10( density_x[x].direction[i].index*density_y[y].direction[i].index)); if (image->matte != MagickFalse) entropy_xy2.direction[i].opacity-=(density_x[x].direction[i].opacity* density_y[y].direction[i].opacity*MagickLog10( density_x[x].direction[i].opacity* density_y[y].direction[i].opacity)); } } channel_features[RedChannel].variance_sum_of_squares[i]= variance.direction[i].red; channel_features[GreenChannel].variance_sum_of_squares[i]= variance.direction[i].green; channel_features[BlueChannel].variance_sum_of_squares[i]= variance.direction[i].blue; if (image->colorspace == CMYKColorspace) channel_features[RedChannel].variance_sum_of_squares[i]= variance.direction[i].index; if (image->matte != MagickFalse) channel_features[RedChannel].variance_sum_of_squares[i]= variance.direction[i].opacity; } /* Compute more texture features. */ (void) ResetMagickMemory(&variance,0,sizeof(variance)); (void) ResetMagickMemory(&sum_squares,0,sizeof(sum_squares)); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_number_threads(image,image,number_grays,1) #endif for (i=0; i < 4; i++) { register ssize_t x; for (x=0; x < (ssize_t) number_grays; x++) { /* Difference variance. */ variance.direction[i].red+=density_xy[x].direction[i].red; variance.direction[i].green+=density_xy[x].direction[i].green; variance.direction[i].blue+=density_xy[x].direction[i].blue; if (image->colorspace == CMYKColorspace) variance.direction[i].index+=density_xy[x].direction[i].index; if (image->matte != MagickFalse) variance.direction[i].opacity+=density_xy[x].direction[i].opacity; sum_squares.direction[i].red+=density_xy[x].direction[i].red* density_xy[x].direction[i].red; sum_squares.direction[i].green+=density_xy[x].direction[i].green* density_xy[x].direction[i].green; sum_squares.direction[i].blue+=density_xy[x].direction[i].blue* density_xy[x].direction[i].blue; if (image->colorspace == CMYKColorspace) sum_squares.direction[i].index+=density_xy[x].direction[i].index* density_xy[x].direction[i].index; if (image->matte != MagickFalse) sum_squares.direction[i].opacity+=density_xy[x].direction[i].opacity* density_xy[x].direction[i].opacity; /* Difference entropy. */ channel_features[RedChannel].difference_entropy[i]-= density_xy[x].direction[i].red* MagickLog10(density_xy[x].direction[i].red); channel_features[GreenChannel].difference_entropy[i]-= density_xy[x].direction[i].green* MagickLog10(density_xy[x].direction[i].green); channel_features[BlueChannel].difference_entropy[i]-= density_xy[x].direction[i].blue* MagickLog10(density_xy[x].direction[i].blue); if (image->colorspace == CMYKColorspace) channel_features[IndexChannel].difference_entropy[i]-= density_xy[x].direction[i].index* MagickLog10(density_xy[x].direction[i].index); if (image->matte != MagickFalse) channel_features[OpacityChannel].difference_entropy[i]-= density_xy[x].direction[i].opacity* MagickLog10(density_xy[x].direction[i].opacity); /* Information Measures of Correlation. */ entropy_x.direction[i].red-=(density_x[x].direction[i].red* MagickLog10(density_x[x].direction[i].red)); entropy_x.direction[i].green-=(density_x[x].direction[i].green* MagickLog10(density_x[x].direction[i].green)); entropy_x.direction[i].blue-=(density_x[x].direction[i].blue* MagickLog10(density_x[x].direction[i].blue)); if (image->colorspace == CMYKColorspace) entropy_x.direction[i].index-=(density_x[x].direction[i].index* MagickLog10(density_x[x].direction[i].index)); if (image->matte != MagickFalse) entropy_x.direction[i].opacity-=(density_x[x].direction[i].opacity* MagickLog10(density_x[x].direction[i].opacity)); entropy_y.direction[i].red-=(density_y[x].direction[i].red* MagickLog10(density_y[x].direction[i].red)); entropy_y.direction[i].green-=(density_y[x].direction[i].green* MagickLog10(density_y[x].direction[i].green)); entropy_y.direction[i].blue-=(density_y[x].direction[i].blue* MagickLog10(density_y[x].direction[i].blue)); if (image->colorspace == CMYKColorspace) entropy_y.direction[i].index-=(density_y[x].direction[i].index* MagickLog10(density_y[x].direction[i].index)); if (image->matte != MagickFalse) entropy_y.direction[i].opacity-=(density_y[x].direction[i].opacity* MagickLog10(density_y[x].direction[i].opacity)); } /* Difference variance. */ channel_features[RedChannel].difference_variance[i]= (((double) number_grays*number_grays*sum_squares.direction[i].red)- (variance.direction[i].red*variance.direction[i].red))/ ((double) number_grays*number_grays*number_grays*number_grays); channel_features[GreenChannel].difference_variance[i]= (((double) number_grays*number_grays*sum_squares.direction[i].green)- (variance.direction[i].green*variance.direction[i].green))/ ((double) number_grays*number_grays*number_grays*number_grays); channel_features[BlueChannel].difference_variance[i]= (((double) number_grays*number_grays*sum_squares.direction[i].blue)- (variance.direction[i].blue*variance.direction[i].blue))/ ((double) number_grays*number_grays*number_grays*number_grays); if (image->matte != MagickFalse) channel_features[OpacityChannel].difference_variance[i]= (((double) number_grays*number_grays*sum_squares.direction[i].opacity)- (variance.direction[i].opacity*variance.direction[i].opacity))/ ((double) number_grays*number_grays*number_grays*number_grays); if (image->colorspace == CMYKColorspace) channel_features[IndexChannel].difference_variance[i]= (((double) number_grays*number_grays*sum_squares.direction[i].index)- (variance.direction[i].index*variance.direction[i].index))/ ((double) number_grays*number_grays*number_grays*number_grays); /* Information Measures of Correlation. */ channel_features[RedChannel].measure_of_correlation_1[i]= (entropy_xy.direction[i].red-entropy_xy1.direction[i].red)/ (entropy_x.direction[i].red > entropy_y.direction[i].red ? entropy_x.direction[i].red : entropy_y.direction[i].red); channel_features[GreenChannel].measure_of_correlation_1[i]= (entropy_xy.direction[i].green-entropy_xy1.direction[i].green)/ (entropy_x.direction[i].green > entropy_y.direction[i].green ? entropy_x.direction[i].green : entropy_y.direction[i].green); channel_features[BlueChannel].measure_of_correlation_1[i]= (entropy_xy.direction[i].blue-entropy_xy1.direction[i].blue)/ (entropy_x.direction[i].blue > entropy_y.direction[i].blue ? entropy_x.direction[i].blue : entropy_y.direction[i].blue); if (image->colorspace == CMYKColorspace) channel_features[IndexChannel].measure_of_correlation_1[i]= (entropy_xy.direction[i].index-entropy_xy1.direction[i].index)/ (entropy_x.direction[i].index > entropy_y.direction[i].index ? entropy_x.direction[i].index : entropy_y.direction[i].index); if (image->matte != MagickFalse) channel_features[OpacityChannel].measure_of_correlation_1[i]= (entropy_xy.direction[i].opacity-entropy_xy1.direction[i].opacity)/ (entropy_x.direction[i].opacity > entropy_y.direction[i].opacity ? entropy_x.direction[i].opacity : entropy_y.direction[i].opacity); channel_features[RedChannel].measure_of_correlation_2[i]= (sqrt(fabs(1.0-exp(-2.0*(entropy_xy2.direction[i].red- entropy_xy.direction[i].red))))); channel_features[GreenChannel].measure_of_correlation_2[i]= (sqrt(fabs(1.0-exp(-2.0*(entropy_xy2.direction[i].green- entropy_xy.direction[i].green))))); channel_features[BlueChannel].measure_of_correlation_2[i]= (sqrt(fabs(1.0-exp(-2.0*(entropy_xy2.direction[i].blue- entropy_xy.direction[i].blue))))); if (image->colorspace == CMYKColorspace) channel_features[IndexChannel].measure_of_correlation_2[i]= (sqrt(fabs(1.0-exp(-2.0*(entropy_xy2.direction[i].index- entropy_xy.direction[i].index))))); if (image->matte != MagickFalse) channel_features[OpacityChannel].measure_of_correlation_2[i]= (sqrt(fabs(1.0-exp(-2.0*(entropy_xy2.direction[i].opacity- entropy_xy.direction[i].opacity))))); } /* Compute more texture features. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_number_threads(image,image,number_grays,1) #endif for (i=0; i < 4; i++) { register ssize_t z; for (z=0; z < (ssize_t) number_grays; z++) { register ssize_t y; ChannelStatistics pixel; (void) ResetMagickMemory(&pixel,0,sizeof(pixel)); for (y=0; y < (ssize_t) number_grays; y++) { register ssize_t x; for (x=0; x < (ssize_t) number_grays; x++) { /* Contrast: amount of local variations present in an image. */ if (((y-x) == z) || ((x-y) == z)) { pixel.direction[i].red+=cooccurrence[x][y].direction[i].red; pixel.direction[i].green+=cooccurrence[x][y].direction[i].green; pixel.direction[i].blue+=cooccurrence[x][y].direction[i].blue; if (image->colorspace == CMYKColorspace) pixel.direction[i].index+=cooccurrence[x][y].direction[i].index; if (image->matte != MagickFalse) pixel.direction[i].opacity+= cooccurrence[x][y].direction[i].opacity; } /* Maximum Correlation Coefficient. */ Q[z][y].direction[i].red+=cooccurrence[z][x].direction[i].red* cooccurrence[y][x].direction[i].red/density_x[z].direction[i].red/ density_y[x].direction[i].red; Q[z][y].direction[i].green+=cooccurrence[z][x].direction[i].green* cooccurrence[y][x].direction[i].green/ density_x[z].direction[i].green/density_y[x].direction[i].red; Q[z][y].direction[i].blue+=cooccurrence[z][x].direction[i].blue* cooccurrence[y][x].direction[i].blue/density_x[z].direction[i].blue/ density_y[x].direction[i].blue; if (image->colorspace == CMYKColorspace) Q[z][y].direction[i].index+=cooccurrence[z][x].direction[i].index* cooccurrence[y][x].direction[i].index/ density_x[z].direction[i].index/density_y[x].direction[i].index; if (image->matte != MagickFalse) Q[z][y].direction[i].opacity+= cooccurrence[z][x].direction[i].opacity* cooccurrence[y][x].direction[i].opacity/ density_x[z].direction[i].opacity/ density_y[x].direction[i].opacity; } } channel_features[RedChannel].contrast[i]+=z*z*pixel.direction[i].red; channel_features[GreenChannel].contrast[i]+=z*z*pixel.direction[i].green; channel_features[BlueChannel].contrast[i]+=z*z*pixel.direction[i].blue; if (image->colorspace == CMYKColorspace) channel_features[BlackChannel].contrast[i]+=z*z* pixel.direction[i].index; if (image->matte != MagickFalse) channel_features[OpacityChannel].contrast[i]+=z*z* pixel.direction[i].opacity; } /* Maximum Correlation Coefficient. Future: return second largest eigenvalue of Q. */ channel_features[RedChannel].maximum_correlation_coefficient[i]= sqrt((double) -1.0); channel_features[GreenChannel].maximum_correlation_coefficient[i]= sqrt((double) -1.0); channel_features[BlueChannel].maximum_correlation_coefficient[i]= sqrt((double) -1.0); if (image->colorspace == CMYKColorspace) channel_features[IndexChannel].maximum_correlation_coefficient[i]= sqrt((double) -1.0); if (image->matte != MagickFalse) channel_features[OpacityChannel].maximum_correlation_coefficient[i]= sqrt((double) -1.0); } /* Relinquish resources. */ sum=(ChannelStatistics *) RelinquishMagickMemory(sum); for (i=0; i < (ssize_t) number_grays; i++) Q[i]=(ChannelStatistics *) RelinquishMagickMemory(Q[i]); Q=(ChannelStatistics **) RelinquishMagickMemory(Q); density_y=(ChannelStatistics *) RelinquishMagickMemory(density_y); density_xy=(ChannelStatistics *) RelinquishMagickMemory(density_xy); density_x=(ChannelStatistics *) RelinquishMagickMemory(density_x); for (i=0; i < (ssize_t) number_grays; i++) cooccurrence[i]=(ChannelStatistics *) RelinquishMagickMemory(cooccurrence[i]); cooccurrence=(ChannelStatistics **) RelinquishMagickMemory(cooccurrence); return(channel_features); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % H o u g h L i n e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Use HoughLineImage() in conjunction with any binary edge extracted image (we % recommand Canny) to identify lines in the image. The algorithm accumulates % counts for every white pixel for every possible orientation (for angles from % 0 to 179 in 1 degree increments) and distance from the center of the image to % the corner (in 1 px increments) and stores the counts in an accumulator matrix % of angle vs distance. The size of the accumulator is 180x(diagonal/2). Next % it searches this space for peaks in counts and converts the locations of the % peaks to slope and intercept in the normal x,y input image space. Use the % slope/intercepts to find the endpoints clipped to the bounds of the image. The % lines are then drawn. The counts are a measure of the length of the lines % % The format of the HoughLineImage method is: % % Image *HoughLineImage(const Image *image,const size_t width, % const size_t height,const size_t threshold,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o width, height: find line pairs as local maxima in this neighborhood. % % o threshold: the line count threshold. % % o exception: return any errors or warnings in this structure. % */ static inline double MagickRound(double x) { /* Round the fraction to nearest integer. */ if ((x-floor(x)) < (ceil(x)-x)) return(floor(x)); return(ceil(x)); } static Image *RenderHoughLines(const ImageInfo *image_info,const size_t columns, const size_t rows,ExceptionInfo *exception) { #define BoundingBox "viewbox" DrawInfo *draw_info; Image *image; MagickBooleanType status; /* Open image. */ image=AcquireImage(image_info); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } image->columns=columns; image->rows=rows; draw_info=CloneDrawInfo(image_info,(DrawInfo *) NULL); draw_info->affine.sx=image->x_resolution == 0.0 ? 1.0 : image->x_resolution/ DefaultResolution; draw_info->affine.sy=image->y_resolution == 0.0 ? 1.0 : image->y_resolution/ DefaultResolution; image->columns=(size_t) (draw_info->affine.sx*image->columns); image->rows=(size_t) (draw_info->affine.sy*image->rows); status=SetImageExtent(image,image->columns,image->rows); if (status == MagickFalse) return(DestroyImageList(image)); if (SetImageBackgroundColor(image) == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Render drawing. */ if (GetBlobStreamData(image) == (unsigned char *) NULL) draw_info->primitive=FileToString(image->filename,~0UL,exception); else { draw_info->primitive=(char *) AcquireMagickMemory((size_t) GetBlobSize(image)+1); if (draw_info->primitive != (char *) NULL) { (void) CopyMagickMemory(draw_info->primitive,GetBlobStreamData(image), (size_t) GetBlobSize(image)); draw_info->primitive[GetBlobSize(image)]='\0'; } } (void) DrawImage(image,draw_info); draw_info=DestroyDrawInfo(draw_info); (void) CloseBlob(image); return(GetFirstImageInList(image)); } MagickExport Image *HoughLineImage(const Image *image,const size_t width, const size_t height,const size_t threshold,ExceptionInfo *exception) { #define HoughLineImageTag "HoughLine/Image" CacheView *image_view; char message[MaxTextExtent], path[MaxTextExtent]; const char *artifact; double hough_height; Image *lines_image = NULL; ImageInfo *image_info; int file; MagickBooleanType status; MagickOffsetType progress; MatrixInfo *accumulator; PointInfo center; register ssize_t y; size_t accumulator_height, accumulator_width, line_count; /* Create the accumulator. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); accumulator_width=180; hough_height=((sqrt(2.0)*(double) (image->rows > image->columns ? image->rows : image->columns))/2.0); accumulator_height=(size_t) (2.0*hough_height); accumulator=AcquireMatrixInfo(accumulator_width,accumulator_height, sizeof(double),exception); if (accumulator == (MatrixInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); if (NullMatrix(accumulator) == MagickFalse) { accumulator=DestroyMatrixInfo(accumulator); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Populate the accumulator. */ status=MagickTrue; progress=0; center.x=(double) image->columns/2.0; center.y=(double) image->rows/2.0; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelIntensity(image,p) > (QuantumRange/2.0)) { register ssize_t i; for (i=0; i < 180; i++) { double count, radius; radius=(((double) x-center.x)*cos(DegreesToRadians((double) i)))+ (((double) y-center.y)*sin(DegreesToRadians((double) i))); (void) GetMatrixElement(accumulator,i,(ssize_t) MagickRound(radius+hough_height),&count); count++; (void) SetMatrixElement(accumulator,i,(ssize_t) MagickRound(radius+hough_height),&count); } } p++; } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_HoughLineImage) #endif proceed=SetImageProgress(image,HoughLineImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); if (status == MagickFalse) { accumulator=DestroyMatrixInfo(accumulator); return((Image *) NULL); } /* Generate line segments from accumulator. */ file=AcquireUniqueFileResource(path); if (file == -1) { accumulator=DestroyMatrixInfo(accumulator); return((Image *) NULL); } (void) FormatLocaleString(message,MaxTextExtent, "# Hough line transform: %.20gx%.20g%+.20g\n",(double) width, (double) height,(double) threshold); if (write(file,message,strlen(message)) != (ssize_t) strlen(message)) status=MagickFalse; (void) FormatLocaleString(message,MaxTextExtent,"viewbox 0 0 %.20g %.20g\n", (double) image->columns,(double) image->rows); if (write(file,message,strlen(message)) != (ssize_t) strlen(message)) status=MagickFalse; line_count=image->columns > image->rows ? image->columns/4 : image->rows/4; if (threshold != 0) line_count=threshold; for (y=0; y < (ssize_t) accumulator_height; y++) { register ssize_t x; for (x=0; x < (ssize_t) accumulator_width; x++) { double count; (void) GetMatrixElement(accumulator,x,y,&count); if (count >= (double) line_count) { double maxima; SegmentInfo line; ssize_t v; /* Is point a local maxima? */ maxima=count; for (v=(-((ssize_t) height/2)); v <= (((ssize_t) height/2)); v++) { ssize_t u; for (u=(-((ssize_t) width/2)); u <= (((ssize_t) width/2)); u++) { if ((u != 0) || (v !=0)) { (void) GetMatrixElement(accumulator,x+u,y+v,&count); if (count > maxima) { maxima=count; break; } } } if (u < (ssize_t) (width/2)) break; } (void) GetMatrixElement(accumulator,x,y,&count); if (maxima > count) continue; if ((x >= 45) && (x <= 135)) { /* y = (r-x cos(t))/sin(t) */ line.x1=0.0; line.y1=((double) (y-(accumulator_height/2.0))-((line.x1- (image->columns/2.0))*cos(DegreesToRadians((double) x))))/ sin(DegreesToRadians((double) x))+(image->rows/2.0); line.x2=(double) image->columns; line.y2=((double) (y-(accumulator_height/2.0))-((line.x2- (image->columns/2.0))*cos(DegreesToRadians((double) x))))/ sin(DegreesToRadians((double) x))+(image->rows/2.0); } else { /* x = (r-y cos(t))/sin(t) */ line.y1=0.0; line.x1=((double) (y-(accumulator_height/2.0))-((line.y1- (image->rows/2.0))*sin(DegreesToRadians((double) x))))/ cos(DegreesToRadians((double) x))+(image->columns/2.0); line.y2=(double) image->rows; line.x2=((double) (y-(accumulator_height/2.0))-((line.y2- (image->rows/2.0))*sin(DegreesToRadians((double) x))))/ cos(DegreesToRadians((double) x))+(image->columns/2.0); } (void) FormatLocaleString(message,MaxTextExtent, "line %g,%g %g,%g # %g\n",line.x1,line.y1,line.x2,line.y2,maxima); if (write(file,message,strlen(message)) != (ssize_t) strlen(message)) status=MagickFalse; } } } (void) close(file); /* Render lines to image canvas. */ image_info=AcquireImageInfo(); image_info->background_color=image->background_color; (void) FormatLocaleString(image_info->filename,MaxTextExtent,"%s",path); artifact=GetImageArtifact(image,"background"); if (artifact != (const char *) NULL) (void) SetImageOption(image_info,"background",artifact); artifact=GetImageArtifact(image,"fill"); if (artifact != (const char *) NULL) (void) SetImageOption(image_info,"fill",artifact); artifact=GetImageArtifact(image,"stroke"); if (artifact != (const char *) NULL) (void) SetImageOption(image_info,"stroke",artifact); artifact=GetImageArtifact(image,"strokewidth"); if (artifact != (const char *) NULL) (void) SetImageOption(image_info,"strokewidth",artifact); lines_image=RenderHoughLines(image_info,image->columns,image->rows,exception); artifact=GetImageArtifact(image,"hough-lines:accumulator"); if ((lines_image != (Image *) NULL) && (IsMagickTrue(artifact) != MagickFalse)) { Image *accumulator_image; accumulator_image=MatrixToImage(accumulator,exception); if (accumulator_image != (Image *) NULL) AppendImageToList(&lines_image,accumulator_image); } /* Free resources. */ accumulator=DestroyMatrixInfo(accumulator); image_info=DestroyImageInfo(image_info); (void) RelinquishUniqueFileResource(path); return(GetFirstImageInList(lines_image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M e a n S h i f t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MeanShiftImage() delineate arbitrarily shaped clusters in the image. For % each pixel, it visits all the pixels in the neighborhood specified by % the window centered at the pixel and excludes those that are outside the % radius=(window-1)/2 surrounding the pixel. From those pixels, it finds those % that are within the specified color distance from the current mean, and % computes a new x,y centroid from those coordinates and a new mean. This new % x,y centroid is used as the center for a new window. This process iterates % until it converges and the final mean is replaces the (original window % center) pixel value. It repeats this process for the next pixel, etc., % until it processes all pixels in the image. Results are typically better with % colorspaces other than sRGB. We recommend YIQ, YUV or YCbCr. % % The format of the MeanShiftImage method is: % % Image *MeanShiftImage(const Image *image,const size_t width, % const size_t height,const double color_distance, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o width, height: find pixels in this neighborhood. % % o color_distance: the color distance. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *MeanShiftImage(const Image *image,const size_t width, const size_t height,const double color_distance,ExceptionInfo *exception) { #define MaxMeanShiftIterations 100 #define MeanShiftImageTag "MeanShift/Image" CacheView *image_view, *mean_view, *pixel_view; Image *mean_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); mean_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if (mean_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(mean_image,DirectClass) == MagickFalse) { InheritException(exception,&mean_image->exception); mean_image=DestroyImage(mean_image); return((Image *) NULL); } status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); pixel_view=AcquireVirtualCacheView(image,exception); mean_view=AcquireAuthenticCacheView(mean_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status,progress) \ magick_number_threads(mean_image,mean_image,mean_image->rows,1) #endif for (y=0; y < (ssize_t) mean_image->rows; y++) { register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewAuthenticPixels(mean_view,0,y,mean_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); for (x=0; x < (ssize_t) mean_image->columns; x++) { MagickPixelPacket mean_pixel, previous_pixel; PointInfo mean_location, previous_location; register ssize_t i; GetMagickPixelPacket(image,&mean_pixel); SetMagickPixelPacket(image,p,indexes+x,&mean_pixel); mean_location.x=(double) x; mean_location.y=(double) y; for (i=0; i < MaxMeanShiftIterations; i++) { double distance, gamma; MagickPixelPacket sum_pixel; PointInfo sum_location; ssize_t count, v; sum_location.x=0.0; sum_location.y=0.0; GetMagickPixelPacket(image,&sum_pixel); previous_location=mean_location; previous_pixel=mean_pixel; count=0; for (v=(-((ssize_t) height/2)); v <= (((ssize_t) height/2)); v++) { ssize_t u; for (u=(-((ssize_t) width/2)); u <= (((ssize_t) width/2)); u++) { if ((v*v+u*u) <= (ssize_t) ((width/2)*(height/2))) { PixelPacket pixel; status=GetOneCacheViewVirtualPixel(pixel_view,(ssize_t) MagickRound(mean_location.x+u),(ssize_t) MagickRound( mean_location.y+v),&pixel,exception); distance=(mean_pixel.red-pixel.red)*(mean_pixel.red-pixel.red)+ (mean_pixel.green-pixel.green)*(mean_pixel.green-pixel.green)+ (mean_pixel.blue-pixel.blue)*(mean_pixel.blue-pixel.blue); if (distance <= (color_distance*color_distance)) { sum_location.x+=mean_location.x+u; sum_location.y+=mean_location.y+v; sum_pixel.red+=pixel.red; sum_pixel.green+=pixel.green; sum_pixel.blue+=pixel.blue; sum_pixel.opacity+=pixel.opacity; count++; } } } } gamma=1.0/count; mean_location.x=gamma*sum_location.x; mean_location.y=gamma*sum_location.y; mean_pixel.red=gamma*sum_pixel.red; mean_pixel.green=gamma*sum_pixel.green; mean_pixel.blue=gamma*sum_pixel.blue; mean_pixel.opacity=gamma*sum_pixel.opacity; distance=(mean_location.x-previous_location.x)* (mean_location.x-previous_location.x)+ (mean_location.y-previous_location.y)* (mean_location.y-previous_location.y)+ 255.0*QuantumScale*(mean_pixel.red-previous_pixel.red)* 255.0*QuantumScale*(mean_pixel.red-previous_pixel.red)+ 255.0*QuantumScale*(mean_pixel.green-previous_pixel.green)* 255.0*QuantumScale*(mean_pixel.green-previous_pixel.green)+ 255.0*QuantumScale*(mean_pixel.blue-previous_pixel.blue)* 255.0*QuantumScale*(mean_pixel.blue-previous_pixel.blue); if (distance <= 3.0) break; } q->red=ClampToQuantum(mean_pixel.red); q->green=ClampToQuantum(mean_pixel.green); q->blue=ClampToQuantum(mean_pixel.blue); q->opacity=ClampToQuantum(mean_pixel.opacity); p++; q++; } if (SyncCacheViewAuthenticPixels(mean_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_MeanShiftImage) #endif proceed=SetImageProgress(image,MeanShiftImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } mean_view=DestroyCacheView(mean_view); pixel_view=DestroyCacheView(pixel_view); image_view=DestroyCacheView(image_view); return(mean_image); }
GxB_Vector_serialize.c
//------------------------------------------------------------------------------ // GxB_Vector_serialize: copy a vector into a serialized array of bytes //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // serialize a GrB_Vector into a blob of bytes // This method is similar to GxB_Matrix_serialize. Like that method, it // allocates the blob itself, and hands over the allocated space to the user // application. The blob must be freed by the same free function passed in to // GxB_init, or by the ANSI C11 free() if GrB_init was used. On input, the // blob_size need not be initialized; it is returned as the size of the blob as // allocated. // This method includes the descriptor as the last parameter, which allows // for the compression method to be selected, and controls the # of threads // used to create the blob. Example usage: /* void *blob = NULL ; GrB_Index blob_size = 0 ; GrB_Vector u, B = NULL ; // construct a vector u, then serialized it: GxB_Vector_serialize (&blob, &blob_size, u, NULL) ; // GxB mallocs the blob GxB_Vector_deserialize (&B, atype, blob, blob_size, NULL) ; free (blob) ; // user frees the blob */ #include "GB.h" #include "GB_serialize.h" GrB_Info GxB_Vector_serialize // serialize a GrB_Vector to a blob ( // output: void **blob_handle, // the blob, allocated on output GrB_Index *blob_size_handle, // size of the blob on output // input: GrB_Vector u, // vector to serialize const GrB_Descriptor desc // descriptor to select compression method // and to control # of threads used ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- GB_WHERE1 ("GxB_Vector_serialize (&blob, &blob_size, u, desc)") ; GB_BURBLE_START ("GxB_Vector_serialize") ; GB_RETURN_IF_NULL (blob_handle) ; GB_RETURN_IF_NULL (blob_size_handle) ; GB_RETURN_IF_NULL_OR_FAULTY (u) ; GB_GET_DESCRIPTOR (info, desc, xx1, xx2, xx3, xx4, xx5, xx6, xx7) ; // get the compression method from the descriptor int method = (desc == NULL) ? GxB_DEFAULT : desc->compression ; //-------------------------------------------------------------------------- // serialize the vector //-------------------------------------------------------------------------- (*blob_handle) = NULL ; size_t blob_size = 0 ; info = GB_serialize ((GB_void **) blob_handle, &blob_size, (GrB_Matrix) u, method, Context) ; (*blob_size_handle) = (GrB_Index) blob_size ; GB_BURBLE_END ; #pragma omp flush return (info) ; }
inputParallelFor.c
#include <stdio.h> #ifdef _OPENMP #include "omp.h" #endif static long num_steps=10000000; double step; int main() { int i; double x,pi, sum=0.0; step=1.0/(double)num_steps; #pragma omp parallel for reduction (+:sum) private (x) for(i=1;i<=num_steps;i++) { x=(i-0.5)*step; sum=sum+ 4.0/(1.0+x*x); } pi=step*sum; printf("step:%e sum:%f PI=%.20f\n",step,sum, pi); return 0; }
3d25pt.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-2, 3D 25 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) #ifndef min #define min(x,y) ((x) < (y)? (x) : (y)) #endif /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); double ***roc2 = (double ***) malloc(sizeof(double**)); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); roc2 = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); roc2[i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); roc2[i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 4; tile_size[3] = 1024; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); roc2[i][j][k] = 2.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif const double coef0 = -0.28472; const double coef1 = 0.16000; const double coef2 = -0.02000; const double coef3 = 0.00254; const double coef4 = -0.00018; for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1=-1;t1<=floord(Nt-1,2);t1++) { lbp=max(ceild(t1,2),ceild(4*t1-Nt+2,4)); ubp=min(floord(4*Nt+Nz-9,16),floord(8*t1+Nz+2,16)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(max(1,ceild(16*t2-Nz+9,4)),2*t1+1),4*t1-4*t2+2);t3<=min(min(min(floord(4*Nt+Ny-9,4),floord(8*t1+Ny+7,4)),floord(16*t2+Ny+3,4)),floord(16*t1-16*t2+Nz+Ny+5,4));t3++) { for (t4=max(max(max(0,ceild(t1-127,128)),ceild(16*t2-Nz-1011,1024)),ceild(4*t3-Ny-1011,1024));t4<=min(min(min(min(floord(4*Nt+Nx-9,1024),floord(8*t1+Nx+7,1024)),floord(16*t2+Nx+3,1024)),floord(4*t3+Nx-9,1024)),floord(16*t1-16*t2+Nz+Nx+5,1024));t4++) { for (t5=max(max(max(max(max(0,ceild(16*t2-Nz+5,4)),ceild(4*t3-Ny+5,4)),ceild(1024*t4-Nx+5,4)),2*t1),4*t1-4*t2+1);t5<=min(min(min(min(min(floord(16*t1-16*t2+Nz+10,4),Nt-1),2*t1+3),4*t2+2),t3-1),256*t4+254);t5++) { for (t6=max(max(16*t2,4*t5+4),-16*t1+16*t2+8*t5-15);t6<=min(min(16*t2+15,-16*t1+16*t2+8*t5),4*t5+Nz-5);t6++) { for (t7=4*t3;t7<=min(4*t3+3,4*t5+Ny-5);t7++) { lbv=max(1024*t4,4*t5+4); ubv=min(1024*t4+1023,4*t5+Nx-5); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((2.0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) - A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (roc2[ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (((((coef0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef1 * (((((A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef2 * (((((A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef3 * (((((A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef4 * (((((A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])))));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = MIN(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); free(roc2[i][j]); } free(A[0][i]); free(A[1][i]); free(roc2[i]); } free(A[0]); free(A[1]); free(roc2); return 0; }
libimagequant.c
/* ** © 2009-2018 by Kornel Lesiński. ** © 1989, 1991 by Jef Poskanzer. ** © 1997, 2000, 2002 by Greg Roelofs; based on an idea by Stefan Schneider. ** ** See COPYRIGHT file for license. */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <stdarg.h> #include <stdbool.h> #include <stdint.h> #include <limits.h> #if !(defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199900L) && !(defined(_MSC_VER) && _MSC_VER >= 1800) #error "This program requires C99, e.g. -std=c99 switch in GCC or it requires MSVC 18.0 or higher." #error "Ignore torrent of syntax errors that may follow. It's only because compiler is set to use too old C version." #endif #ifdef _OPENMP #include <omp.h> #define LIQ_TEMP_ROW_WIDTH(img_width) (((img_width) | 15) + 1) /* keep alignment & leave space between rows to avoid cache line contention */ #else #define LIQ_TEMP_ROW_WIDTH(img_width) (img_width) #define omp_get_max_threads() 1 #define omp_get_thread_num() 0 #endif #include "libimagequant.h" #include "pam.h" #include "mediancut.h" #include "nearest.h" #include "blur.h" #include "kmeans.h" #define LIQ_HIGH_MEMORY_LIMIT (1<<26) /* avoid allocating buffers larger than 64MB */ // each structure has a pointer as a unique identifier that allows type checking at run time static const char liq_attr_magic[] = "liq_attr"; static const char liq_image_magic[] = "liq_image"; static const char liq_result_magic[] = "liq_result"; static const char liq_histogram_magic[] = "liq_histogram"; static const char liq_remapping_result_magic[] = "liq_remapping_result"; static const char liq_freed_magic[] = "free"; #define CHECK_STRUCT_TYPE(attr, kind) liq_crash_if_invalid_handle_pointer_given((const liq_attr*)attr, kind ## _magic) #define CHECK_USER_POINTER(ptr) liq_crash_if_invalid_pointer_given(ptr) struct liq_attr { const char *magic_header; void* (*malloc)(size_t); void (*free)(void*); double target_mse, max_mse, kmeans_iteration_limit; float min_opaque_val; unsigned int max_colors, max_histogram_entries; unsigned int min_posterization_output /* user setting */, min_posterization_input /* speed setting */; unsigned int kmeans_iterations, feedback_loop_trials; bool last_index_transparent, use_contrast_maps; unsigned char use_dither_map; unsigned char speed; unsigned char progress_stage1, progress_stage2, progress_stage3; liq_progress_callback_function *progress_callback; void *progress_callback_user_info; liq_log_callback_function *log_callback; void *log_callback_user_info; liq_log_flush_callback_function *log_flush_callback; void *log_flush_callback_user_info; }; struct liq_image { const char *magic_header; void* (*malloc)(size_t); void (*free)(void*); f_pixel *f_pixels; rgba_pixel **rows; double gamma; unsigned int width, height; unsigned char *importance_map, *edges, *dither_map; rgba_pixel *pixels, *temp_row; f_pixel *temp_f_row; liq_image_get_rgba_row_callback *row_callback; void *row_callback_user_info; liq_image *background; float min_opaque_val; f_pixel fixed_colors[256]; unsigned short fixed_colors_count; bool free_pixels, free_rows, free_rows_internal; }; typedef struct liq_remapping_result { const char *magic_header; void* (*malloc)(size_t); void (*free)(void*); unsigned char *pixels; colormap *palette; liq_progress_callback_function *progress_callback; void *progress_callback_user_info; liq_palette int_palette; double gamma, palette_error; float dither_level; unsigned char use_dither_map; unsigned char progress_stage1; } liq_remapping_result; struct liq_result { const char *magic_header; void* (*malloc)(size_t); void (*free)(void*); liq_remapping_result *remapping; colormap *palette; liq_progress_callback_function *progress_callback; void *progress_callback_user_info; liq_palette int_palette; float dither_level; double gamma, palette_error; int min_posterization_output; unsigned char use_dither_map; }; struct liq_histogram { const char *magic_header; void* (*malloc)(size_t); void (*free)(void*); struct acolorhash_table *acht; double gamma; f_pixel fixed_colors[256]; unsigned short fixed_colors_count; unsigned short ignorebits; bool had_image_added; }; static void modify_alpha(liq_image *input_image, rgba_pixel *const row_pixels) LIQ_NONNULL; static void contrast_maps(liq_image *image) LIQ_NONNULL; static liq_error finalize_histogram(liq_histogram *input_hist, liq_attr *options, histogram **hist_output) LIQ_NONNULL; static const rgba_pixel *liq_image_get_row_rgba(liq_image *input_image, unsigned int row) LIQ_NONNULL; static bool liq_image_get_row_f_init(liq_image *img) LIQ_NONNULL; static const f_pixel *liq_image_get_row_f(liq_image *input_image, unsigned int row) LIQ_NONNULL; static void liq_remapping_result_destroy(liq_remapping_result *result) LIQ_NONNULL; static liq_error pngquant_quantize(histogram *hist, const liq_attr *options, const int fixed_colors_count, const f_pixel fixed_colors[], const double gamma, bool fixed_result_colors, liq_result **) LIQ_NONNULL; static liq_error liq_histogram_quantize_internal(liq_histogram *input_hist, liq_attr *attr, bool fixed_result_colors, liq_result **result_output) LIQ_NONNULL; LIQ_NONNULL static void liq_verbose_printf(const liq_attr *context, const char *fmt, ...) { if (context->log_callback) { va_list va; va_start(va, fmt); int required_space = vsnprintf(NULL, 0, fmt, va)+1; // +\0 va_end(va); LIQ_ARRAY(char, buf, required_space); va_start(va, fmt); vsnprintf(buf, required_space, fmt, va); va_end(va); context->log_callback(context, buf, context->log_callback_user_info); } } LIQ_NONNULL inline static void verbose_print(const liq_attr *attr, const char *msg) { if (attr->log_callback) { attr->log_callback(attr, msg, attr->log_callback_user_info); } } LIQ_NONNULL static void liq_verbose_printf_flush(liq_attr *attr) { if (attr->log_flush_callback) { attr->log_flush_callback(attr, attr->log_flush_callback_user_info); } } LIQ_NONNULL static bool liq_progress(const liq_attr *attr, const float percent) { return attr->progress_callback && !attr->progress_callback(percent, attr->progress_callback_user_info); } LIQ_NONNULL static bool liq_remap_progress(const liq_remapping_result *quant, const float percent) { return quant->progress_callback && !quant->progress_callback(percent, quant->progress_callback_user_info); } #if USE_SSE inline static bool is_sse_available() { #if (defined(__x86_64__) || defined(__amd64) || defined(_WIN64)) return true; #elif _MSC_VER int info[4]; __cpuid(info, 1); /* bool is implemented as a built-in type of size 1 in MSVC */ return info[3] & (1<<26) ? true : false; #else int a,b,c,d; cpuid(1, a, b, c, d); return d & (1<<25); // edx bit 25 is set when SSE is present #endif } #endif /* make it clear in backtrace when user-supplied handle points to invalid memory */ NEVER_INLINE LIQ_EXPORT bool liq_crash_if_invalid_handle_pointer_given(const liq_attr *user_supplied_pointer, const char *const expected_magic_header); LIQ_EXPORT bool liq_crash_if_invalid_handle_pointer_given(const liq_attr *user_supplied_pointer, const char *const expected_magic_header) { if (!user_supplied_pointer) { return false; } if (user_supplied_pointer->magic_header == liq_freed_magic) { fprintf(stderr, "%s used after being freed", expected_magic_header); // this is not normal error handling, this is programmer error that should crash the program. // program cannot safely continue if memory has been used after it's been freed. // abort() is nasty, but security vulnerability may be worse. abort(); } return user_supplied_pointer->magic_header == expected_magic_header; } NEVER_INLINE LIQ_EXPORT bool liq_crash_if_invalid_pointer_given(const void *pointer); LIQ_EXPORT bool liq_crash_if_invalid_pointer_given(const void *pointer) { if (!pointer) { return false; } // Force a read from the given (potentially invalid) memory location in order to check early whether this crashes the program or not. // It doesn't matter what value is read, the code here is just to shut the compiler up about unused read. char test_access = *((volatile char *)pointer); return test_access || true; } LIQ_NONNULL static void liq_log_error(const liq_attr *attr, const char *msg) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return; liq_verbose_printf(attr, " error: %s", msg); } static double quality_to_mse(long quality) { if (quality == 0) { return MAX_DIFF; } if (quality == 100) { return 0; } // curve fudged to be roughly similar to quality of libjpeg // except lowest 10 for really low number of colors const double extra_low_quality_fudge = MAX(0,0.016/(0.001+quality) - 0.001); return extra_low_quality_fudge + 2.5/pow(210.0 + quality, 1.2) * (100.1-quality)/100.0; } static unsigned int mse_to_quality(double mse) { for(int i=100; i > 0; i--) { if (mse <= quality_to_mse(i) + 0.000001) { // + epsilon for floating point errors return i; } } return 0; } /** internally MSE is a sum of all channels with pixels 0..1 range, but other software gives per-RGB-channel MSE for 0..255 range */ static double mse_to_standard_mse(double mse) { return mse * 65536.0/6.0; } LIQ_EXPORT LIQ_NONNULL liq_error liq_set_quality(liq_attr* attr, int minimum, int target) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return LIQ_INVALID_POINTER; if (target < 0 || target > 100 || target < minimum || minimum < 0) return LIQ_VALUE_OUT_OF_RANGE; attr->target_mse = quality_to_mse(target); attr->max_mse = quality_to_mse(minimum); return LIQ_OK; } LIQ_EXPORT LIQ_NONNULL int liq_get_min_quality(const liq_attr *attr) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return -1; return mse_to_quality(attr->max_mse); } LIQ_EXPORT LIQ_NONNULL int liq_get_max_quality(const liq_attr *attr) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return -1; return mse_to_quality(attr->target_mse); } LIQ_EXPORT LIQ_NONNULL liq_error liq_set_max_colors(liq_attr* attr, int colors) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return LIQ_INVALID_POINTER; if (colors < 2 || colors > 256) return LIQ_VALUE_OUT_OF_RANGE; attr->max_colors = colors; return LIQ_OK; } LIQ_EXPORT LIQ_NONNULL int liq_get_max_colors(const liq_attr *attr) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return -1; return attr->max_colors; } LIQ_EXPORT LIQ_NONNULL liq_error liq_set_min_posterization(liq_attr *attr, int bits) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return LIQ_INVALID_POINTER; if (bits < 0 || bits > 4) return LIQ_VALUE_OUT_OF_RANGE; attr->min_posterization_output = bits; return LIQ_OK; } LIQ_EXPORT LIQ_NONNULL int liq_get_min_posterization(const liq_attr *attr) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return -1; return attr->min_posterization_output; } LIQ_EXPORT LIQ_NONNULL liq_error liq_set_speed(liq_attr* attr, int speed) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return LIQ_INVALID_POINTER; if (speed < 1 || speed > 10) return LIQ_VALUE_OUT_OF_RANGE; unsigned int iterations = MAX(8-speed, 0); iterations += iterations * iterations/2; attr->kmeans_iterations = iterations; attr->kmeans_iteration_limit = 1.0/(double)(1<<(23-speed)); attr->feedback_loop_trials = MAX(56-9*speed, 0); attr->max_histogram_entries = (1<<17) + (1<<18)*(10-speed); attr->min_posterization_input = (speed >= 8) ? 1 : 0; attr->use_dither_map = (speed <= (omp_get_max_threads() > 1 ? 7 : 5)); // parallelized dither map might speed up floyd remapping if (attr->use_dither_map && speed < 3) { attr->use_dither_map = 2; // always } attr->use_contrast_maps = (speed <= 7) || attr->use_dither_map; attr->speed = speed; attr->progress_stage1 = attr->use_contrast_maps ? 20 : 8; if (attr->feedback_loop_trials < 2) { attr->progress_stage1 += 30; } attr->progress_stage3 = 50 / (1+speed); attr->progress_stage2 = 100 - attr->progress_stage1 - attr->progress_stage3; return LIQ_OK; } LIQ_EXPORT LIQ_NONNULL int liq_get_speed(const liq_attr *attr) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return -1; return attr->speed; } LIQ_EXPORT LIQ_NONNULL liq_error liq_set_output_gamma(liq_result* res, double gamma) { if (!CHECK_STRUCT_TYPE(res, liq_result)) return LIQ_INVALID_POINTER; if (gamma <= 0 || gamma >= 1.0) return LIQ_VALUE_OUT_OF_RANGE; if (res->remapping) { liq_remapping_result_destroy(res->remapping); res->remapping = NULL; } res->gamma = gamma; return LIQ_OK; } LIQ_EXPORT LIQ_NONNULL liq_error liq_set_min_opacity(liq_attr* attr, int min) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return LIQ_INVALID_POINTER; if (min < 0 || min > 255) return LIQ_VALUE_OUT_OF_RANGE; attr->min_opaque_val = (double)min/255.0; return LIQ_OK; } LIQ_EXPORT LIQ_NONNULL int liq_get_min_opacity(const liq_attr *attr) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return -1; return MIN(255.f, 256.f * attr->min_opaque_val); } LIQ_EXPORT LIQ_NONNULL void liq_set_last_index_transparent(liq_attr* attr, int is_last) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return; attr->last_index_transparent = !!is_last; } LIQ_EXPORT void liq_attr_set_progress_callback(liq_attr *attr, liq_progress_callback_function *callback, void *user_info) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return; attr->progress_callback = callback; attr->progress_callback_user_info = user_info; } LIQ_EXPORT void liq_result_set_progress_callback(liq_result *result, liq_progress_callback_function *callback, void *user_info) { if (!CHECK_STRUCT_TYPE(result, liq_result)) return; result->progress_callback = callback; result->progress_callback_user_info = user_info; } LIQ_EXPORT void liq_set_log_callback(liq_attr *attr, liq_log_callback_function *callback, void* user_info) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return; liq_verbose_printf_flush(attr); attr->log_callback = callback; attr->log_callback_user_info = user_info; } LIQ_EXPORT void liq_set_log_flush_callback(liq_attr *attr, liq_log_flush_callback_function *callback, void* user_info) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return; attr->log_flush_callback = callback; attr->log_flush_callback_user_info = user_info; } LIQ_EXPORT liq_attr* liq_attr_create() { return liq_attr_create_with_allocator(NULL, NULL); } LIQ_EXPORT LIQ_NONNULL void liq_attr_destroy(liq_attr *attr) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) { return; } liq_verbose_printf_flush(attr); attr->magic_header = liq_freed_magic; attr->free(attr); } LIQ_EXPORT LIQ_NONNULL liq_attr* liq_attr_copy(const liq_attr *orig) { if (!CHECK_STRUCT_TYPE(orig, liq_attr)) { return NULL; } liq_attr *attr = orig->malloc(sizeof(liq_attr)); if (!attr) return NULL; *attr = *orig; return attr; } static void *liq_aligned_malloc(size_t size) { unsigned char *ptr = malloc(size + 16); if (!ptr) { return NULL; } uintptr_t offset = 16 - ((uintptr_t)ptr & 15); // also reserves 1 byte for ptr[-1] ptr += offset; assert(0 == (((uintptr_t)ptr) & 15)); ptr[-1] = offset ^ 0x59; // store how much pointer was shifted to get the original for free() return ptr; } LIQ_NONNULL static void liq_aligned_free(void *inptr) { unsigned char *ptr = inptr; size_t offset = ptr[-1] ^ 0x59; assert(offset > 0 && offset <= 16); free(ptr - offset); } LIQ_EXPORT liq_attr* liq_attr_create_with_allocator(void* (*custom_malloc)(size_t), void (*custom_free)(void*)) { #if USE_SSE if (!is_sse_available()) { return NULL; } #endif if (!custom_malloc && !custom_free) { custom_malloc = liq_aligned_malloc; custom_free = liq_aligned_free; } else if (!custom_malloc != !custom_free) { return NULL; // either specify both or none } liq_attr *attr = custom_malloc(sizeof(liq_attr)); if (!attr) return NULL; *attr = (liq_attr) { .magic_header = liq_attr_magic, .malloc = custom_malloc, .free = custom_free, .max_colors = 256, .min_opaque_val = 1, // whether preserve opaque colors for IE (1.0=no, does not affect alpha) .last_index_transparent = false, // puts transparent color at last index. This is workaround for blu-ray subtitles. .target_mse = 0, .max_mse = MAX_DIFF, }; liq_set_speed(attr, 4); return attr; } LIQ_EXPORT LIQ_NONNULL liq_error liq_image_add_fixed_color(liq_image *img, liq_color color) { if (!CHECK_STRUCT_TYPE(img, liq_image)) return LIQ_INVALID_POINTER; if (img->fixed_colors_count > 255) return LIQ_UNSUPPORTED; float gamma_lut[256]; to_f_set_gamma(gamma_lut, img->gamma); img->fixed_colors[img->fixed_colors_count++] = rgba_to_f(gamma_lut, (rgba_pixel){ .r = color.r, .g = color.g, .b = color.b, .a = color.a, }); return LIQ_OK; } LIQ_NONNULL static liq_error liq_histogram_add_fixed_color_f(liq_histogram *hist, f_pixel color) { if (hist->fixed_colors_count > 255) return LIQ_UNSUPPORTED; hist->fixed_colors[hist->fixed_colors_count++] = color; return LIQ_OK; } LIQ_EXPORT LIQ_NONNULL liq_error liq_histogram_add_fixed_color(liq_histogram *hist, liq_color color, double gamma) { if (!CHECK_STRUCT_TYPE(hist, liq_histogram)) return LIQ_INVALID_POINTER; float gamma_lut[256]; to_f_set_gamma(gamma_lut, gamma ? gamma : 0.45455); const f_pixel px = rgba_to_f(gamma_lut, (rgba_pixel){ .r = color.r, .g = color.g, .b = color.b, .a = color.a, }); return liq_histogram_add_fixed_color_f(hist, px); } LIQ_NONNULL static bool liq_image_use_low_memory(liq_image *img) { img->temp_f_row = img->malloc(sizeof(img->f_pixels[0]) * LIQ_TEMP_ROW_WIDTH(img->width) * omp_get_max_threads()); return img->temp_f_row != NULL; } LIQ_NONNULL static bool liq_image_should_use_low_memory(liq_image *img, const bool low_memory_hint) { return img->width * img->height > (low_memory_hint ? LIQ_HIGH_MEMORY_LIMIT/8 : LIQ_HIGH_MEMORY_LIMIT) / sizeof(f_pixel); // Watch out for integer overflow } static liq_image *liq_image_create_internal(const liq_attr *attr, rgba_pixel* rows[], liq_image_get_rgba_row_callback *row_callback, void *row_callback_user_info, int width, int height, double gamma) { if (gamma < 0 || gamma > 1.0) { liq_log_error(attr, "gamma must be >= 0 and <= 1 (try 1/gamma instead)"); return NULL; } if (!rows && !row_callback) { liq_log_error(attr, "missing row data"); return NULL; } liq_image *img = attr->malloc(sizeof(liq_image)); if (!img) return NULL; *img = (liq_image){ .magic_header = liq_image_magic, .malloc = attr->malloc, .free = attr->free, .width = width, .height = height, .gamma = gamma ? gamma : 0.45455, .rows = rows, .row_callback = row_callback, .row_callback_user_info = row_callback_user_info, .min_opaque_val = attr->min_opaque_val, }; if (!rows || attr->min_opaque_val < 1.f) { img->temp_row = attr->malloc(sizeof(img->temp_row[0]) * LIQ_TEMP_ROW_WIDTH(width) * omp_get_max_threads()); if (!img->temp_row) return NULL; } // if image is huge or converted pixels are not likely to be reused then don't cache converted pixels if (liq_image_should_use_low_memory(img, !img->temp_row && !attr->use_contrast_maps && !attr->use_dither_map)) { verbose_print(attr, " conserving memory"); if (!liq_image_use_low_memory(img)) return NULL; } if (img->min_opaque_val < 1.f) { verbose_print(attr, " Working around IE6 bug by making image less transparent..."); } return img; } LIQ_EXPORT LIQ_NONNULL liq_error liq_image_set_memory_ownership(liq_image *img, int ownership_flags) { if (!CHECK_STRUCT_TYPE(img, liq_image)) return LIQ_INVALID_POINTER; if (!img->rows || !ownership_flags || (ownership_flags & ~(LIQ_OWN_ROWS|LIQ_OWN_PIXELS))) { return LIQ_VALUE_OUT_OF_RANGE; } if (ownership_flags & LIQ_OWN_ROWS) { if (img->free_rows_internal) return LIQ_VALUE_OUT_OF_RANGE; img->free_rows = true; } if (ownership_flags & LIQ_OWN_PIXELS) { img->free_pixels = true; if (!img->pixels) { // for simplicity of this API there's no explicit bitmap argument, // so the row with the lowest address is assumed to be at the start of the bitmap img->pixels = img->rows[0]; for(unsigned int i=1; i < img->height; i++) { img->pixels = MIN(img->pixels, img->rows[i]); } } } return LIQ_OK; } LIQ_NONNULL static void liq_image_free_maps(liq_image *input_image); LIQ_NONNULL static void liq_image_free_importance_map(liq_image *input_image); LIQ_EXPORT LIQ_NONNULL liq_error liq_image_set_importance_map(liq_image *img, unsigned char importance_map[], size_t buffer_size, enum liq_ownership ownership) { if (!CHECK_STRUCT_TYPE(img, liq_image)) return LIQ_INVALID_POINTER; if (!CHECK_USER_POINTER(importance_map)) return LIQ_INVALID_POINTER; const size_t required_size = img->width * img->height; if (buffer_size < required_size) { return LIQ_BUFFER_TOO_SMALL; } if (ownership == LIQ_COPY_PIXELS) { unsigned char *tmp = img->malloc(required_size); if (!tmp) { return LIQ_OUT_OF_MEMORY; } memcpy(tmp, importance_map, required_size); importance_map = tmp; } else if (ownership != LIQ_OWN_PIXELS) { return LIQ_UNSUPPORTED; } liq_image_free_importance_map(img); img->importance_map = importance_map; return LIQ_OK; } LIQ_EXPORT LIQ_NONNULL liq_error liq_image_set_background(liq_image *img, liq_image *background) { if (!CHECK_STRUCT_TYPE(img, liq_image)) return LIQ_INVALID_POINTER; if (!CHECK_STRUCT_TYPE(background, liq_image)) return LIQ_INVALID_POINTER; if (background->background) { return LIQ_UNSUPPORTED; } if (img->width != background->width || img->height != background->height) { return LIQ_BUFFER_TOO_SMALL; } if (img->background) { liq_image_destroy(img->background); } img->background = background; liq_image_free_maps(img); // Force them to be re-analyzed with the background return LIQ_OK; } LIQ_NONNULL static bool check_image_size(const liq_attr *attr, const int width, const int height) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) { return false; } if (width <= 0 || height <= 0) { liq_log_error(attr, "width and height must be > 0"); return false; } if (width > INT_MAX/sizeof(rgba_pixel)/height || width > INT_MAX/16/sizeof(f_pixel) || height > INT_MAX/sizeof(size_t)) { liq_log_error(attr, "image too large"); return false; } return true; } LIQ_EXPORT liq_image *liq_image_create_custom(const liq_attr *attr, liq_image_get_rgba_row_callback *row_callback, void* user_info, int width, int height, double gamma) { if (!check_image_size(attr, width, height)) { return NULL; } return liq_image_create_internal(attr, NULL, row_callback, user_info, width, height, gamma); } LIQ_EXPORT liq_image *liq_image_create_rgba_rows(const liq_attr *attr, void *const rows[], int width, int height, double gamma) { if (!check_image_size(attr, width, height)) { return NULL; } for(int i=0; i < height; i++) { if (!CHECK_USER_POINTER(rows+i) || !CHECK_USER_POINTER(rows[i])) { liq_log_error(attr, "invalid row pointers"); return NULL; } } return liq_image_create_internal(attr, (rgba_pixel**)rows, NULL, NULL, width, height, gamma); } LIQ_EXPORT LIQ_NONNULL liq_image *liq_image_create_rgba(const liq_attr *attr, const void* bitmap, int width, int height, double gamma) { if (!check_image_size(attr, width, height)) { return NULL; } if (!CHECK_USER_POINTER(bitmap)) { liq_log_error(attr, "invalid bitmap pointer"); return NULL; } rgba_pixel *const pixels = (rgba_pixel *const)bitmap; rgba_pixel **rows = attr->malloc(sizeof(rows[0])*height); if (!rows) return NULL; for(int i=0; i < height; i++) { rows[i] = pixels + width * i; } liq_image *image = liq_image_create_internal(attr, rows, NULL, NULL, width, height, gamma); if (!image) { attr->free(rows); return NULL; } image->free_rows = true; image->free_rows_internal = true; return image; } NEVER_INLINE LIQ_EXPORT void liq_executing_user_callback(liq_image_get_rgba_row_callback *callback, liq_color *temp_row, int row, int width, void *user_info); LIQ_EXPORT void liq_executing_user_callback(liq_image_get_rgba_row_callback *callback, liq_color *temp_row, int row, int width, void *user_info) { assert(callback); assert(temp_row); callback(temp_row, row, width, user_info); } LIQ_NONNULL inline static bool liq_image_has_rgba_pixels(const liq_image *img) { if (!CHECK_STRUCT_TYPE(img, liq_image)) { return false; } return img->rows || (img->temp_row && img->row_callback); } LIQ_NONNULL inline static bool liq_image_can_use_rgba_rows(const liq_image *img) { assert(liq_image_has_rgba_pixels(img)); const bool iebug = img->min_opaque_val < 1.f; return (img->rows && !iebug); } LIQ_NONNULL static const rgba_pixel *liq_image_get_row_rgba(liq_image *img, unsigned int row) { if (liq_image_can_use_rgba_rows(img)) { return img->rows[row]; } assert(img->temp_row); rgba_pixel *temp_row = img->temp_row + LIQ_TEMP_ROW_WIDTH(img->width) * omp_get_thread_num(); if (img->rows) { memcpy(temp_row, img->rows[row], img->width * sizeof(temp_row[0])); } else { liq_executing_user_callback(img->row_callback, (liq_color*)temp_row, row, img->width, img->row_callback_user_info); } if (img->min_opaque_val < 1.f) modify_alpha(img, temp_row); return temp_row; } LIQ_NONNULL static void convert_row_to_f(liq_image *img, f_pixel *row_f_pixels, const unsigned int row, const float gamma_lut[]) { assert(row_f_pixels); assert(!USE_SSE || 0 == ((uintptr_t)row_f_pixels & 15)); const rgba_pixel *const row_pixels = liq_image_get_row_rgba(img, row); for(unsigned int col=0; col < img->width; col++) { row_f_pixels[col] = rgba_to_f(gamma_lut, row_pixels[col]); } } LIQ_NONNULL static bool liq_image_get_row_f_init(liq_image *img) { assert(omp_get_thread_num() == 0); if (img->f_pixels) { return true; } if (!liq_image_should_use_low_memory(img, false)) { img->f_pixels = img->malloc(sizeof(img->f_pixels[0]) * img->width * img->height); } if (!img->f_pixels) { return liq_image_use_low_memory(img); } if (!liq_image_has_rgba_pixels(img)) { return false; } float gamma_lut[256]; to_f_set_gamma(gamma_lut, img->gamma); for(unsigned int i=0; i < img->height; i++) { convert_row_to_f(img, &img->f_pixels[i*img->width], i, gamma_lut); } return true; } LIQ_NONNULL static const f_pixel *liq_image_get_row_f(liq_image *img, unsigned int row) { if (!img->f_pixels) { assert(img->temp_f_row); // init should have done that float gamma_lut[256]; to_f_set_gamma(gamma_lut, img->gamma); f_pixel *row_for_thread = img->temp_f_row + LIQ_TEMP_ROW_WIDTH(img->width) * omp_get_thread_num(); convert_row_to_f(img, row_for_thread, row, gamma_lut); return row_for_thread; } return img->f_pixels + img->width * row; } LIQ_EXPORT LIQ_NONNULL int liq_image_get_width(const liq_image *input_image) { if (!CHECK_STRUCT_TYPE(input_image, liq_image)) return -1; return input_image->width; } LIQ_EXPORT LIQ_NONNULL int liq_image_get_height(const liq_image *input_image) { if (!CHECK_STRUCT_TYPE(input_image, liq_image)) return -1; return input_image->height; } typedef void free_func(void*); LIQ_NONNULL static free_func *get_default_free_func(liq_image *img) { // When default allocator is used then user-supplied pointers must be freed with free() if (img->free_rows_internal || img->free != liq_aligned_free) { return img->free; } return free; } LIQ_NONNULL static void liq_image_free_rgba_source(liq_image *input_image) { if (input_image->free_pixels && input_image->pixels) { get_default_free_func(input_image)(input_image->pixels); input_image->pixels = NULL; } if (input_image->free_rows && input_image->rows) { get_default_free_func(input_image)(input_image->rows); input_image->rows = NULL; } } LIQ_NONNULL static void liq_image_free_importance_map(liq_image *input_image) { if (input_image->importance_map) { input_image->free(input_image->importance_map); input_image->importance_map = NULL; } } LIQ_NONNULL static void liq_image_free_maps(liq_image *input_image) { liq_image_free_importance_map(input_image); if (input_image->edges) { input_image->free(input_image->edges); input_image->edges = NULL; } if (input_image->dither_map) { input_image->free(input_image->dither_map); input_image->dither_map = NULL; } } LIQ_EXPORT LIQ_NONNULL void liq_image_destroy(liq_image *input_image) { if (!CHECK_STRUCT_TYPE(input_image, liq_image)) return; liq_image_free_rgba_source(input_image); liq_image_free_maps(input_image); if (input_image->f_pixels) { input_image->free(input_image->f_pixels); } if (input_image->temp_row) { input_image->free(input_image->temp_row); } if (input_image->temp_f_row) { input_image->free(input_image->temp_f_row); } if (input_image->background) { liq_image_destroy(input_image->background); } input_image->magic_header = liq_freed_magic; input_image->free(input_image); } LIQ_EXPORT liq_histogram* liq_histogram_create(const liq_attr* attr) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) { return NULL; } liq_histogram *hist = attr->malloc(sizeof(liq_histogram)); if (!hist) return NULL; *hist = (liq_histogram) { .magic_header = liq_histogram_magic, .malloc = attr->malloc, .free = attr->free, .ignorebits = MAX(attr->min_posterization_output, attr->min_posterization_input), }; return hist; } LIQ_EXPORT LIQ_NONNULL void liq_histogram_destroy(liq_histogram *hist) { if (!CHECK_STRUCT_TYPE(hist, liq_histogram)) return; hist->magic_header = liq_freed_magic; pam_freeacolorhash(hist->acht); hist->free(hist); } LIQ_EXPORT LIQ_NONNULL liq_result *liq_quantize_image(liq_attr *attr, liq_image *img) { liq_result *res; if (LIQ_OK != liq_image_quantize(img, attr, &res)) { return NULL; } return res; } LIQ_EXPORT LIQ_NONNULL liq_error liq_image_quantize(liq_image *const img, liq_attr *const attr, liq_result **result_output) { if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return LIQ_INVALID_POINTER; if (!liq_image_has_rgba_pixels(img)) { return LIQ_UNSUPPORTED; } liq_histogram *hist = liq_histogram_create(attr); if (!hist) { return LIQ_OUT_OF_MEMORY; } liq_error err = liq_histogram_add_image(hist, attr, img); if (LIQ_OK != err) { return err; } err = liq_histogram_quantize_internal(hist, attr, false, result_output); liq_histogram_destroy(hist); return err; } LIQ_EXPORT LIQ_NONNULL liq_error liq_histogram_quantize(liq_histogram *input_hist, liq_attr *attr, liq_result **result_output) { return liq_histogram_quantize_internal(input_hist, attr, true, result_output); } LIQ_NONNULL static liq_error liq_histogram_quantize_internal(liq_histogram *input_hist, liq_attr *attr, bool fixed_result_colors, liq_result **result_output) { if (!CHECK_USER_POINTER(result_output)) return LIQ_INVALID_POINTER; *result_output = NULL; if (!CHECK_STRUCT_TYPE(attr, liq_attr)) return LIQ_INVALID_POINTER; if (!CHECK_STRUCT_TYPE(input_hist, liq_histogram)) return LIQ_INVALID_POINTER; if (liq_progress(attr, 0)) return LIQ_ABORTED; histogram *hist; liq_error err = finalize_histogram(input_hist, attr, &hist); if (err != LIQ_OK) { return err; } err = pngquant_quantize(hist, attr, input_hist->fixed_colors_count, input_hist->fixed_colors, input_hist->gamma, fixed_result_colors, result_output); pam_freeacolorhist(hist); return err; } LIQ_EXPORT LIQ_NONNULL liq_error liq_set_dithering_level(liq_result *res, float dither_level) { if (!CHECK_STRUCT_TYPE(res, liq_result)) return LIQ_INVALID_POINTER; if (res->remapping) { liq_remapping_result_destroy(res->remapping); res->remapping = NULL; } if (res->dither_level < 0 || res->dither_level > 1.0f) return LIQ_VALUE_OUT_OF_RANGE; res->dither_level = dither_level; return LIQ_OK; } LIQ_NONNULL static liq_remapping_result *liq_remapping_result_create(liq_result *result) { if (!CHECK_STRUCT_TYPE(result, liq_result)) { return NULL; } liq_remapping_result *res = result->malloc(sizeof(liq_remapping_result)); if (!res) return NULL; *res = (liq_remapping_result) { .magic_header = liq_remapping_result_magic, .malloc = result->malloc, .free = result->free, .dither_level = result->dither_level, .use_dither_map = result->use_dither_map, .palette_error = result->palette_error, .gamma = result->gamma, .palette = pam_duplicate_colormap(result->palette), .progress_callback = result->progress_callback, .progress_callback_user_info = result->progress_callback_user_info, .progress_stage1 = result->use_dither_map ? 20 : 0, }; return res; } LIQ_EXPORT LIQ_NONNULL double liq_get_output_gamma(const liq_result *result) { if (!CHECK_STRUCT_TYPE(result, liq_result)) return -1; return result->gamma; } LIQ_NONNULL static void liq_remapping_result_destroy(liq_remapping_result *result) { if (!CHECK_STRUCT_TYPE(result, liq_remapping_result)) return; if (result->palette) pam_freecolormap(result->palette); if (result->pixels) result->free(result->pixels); result->magic_header = liq_freed_magic; result->free(result); } LIQ_EXPORT LIQ_NONNULL void liq_result_destroy(liq_result *res) { if (!CHECK_STRUCT_TYPE(res, liq_result)) return; memset(&res->int_palette, 0, sizeof(liq_palette)); if (res->remapping) { memset(&res->remapping->int_palette, 0, sizeof(liq_palette)); liq_remapping_result_destroy(res->remapping); } pam_freecolormap(res->palette); res->magic_header = liq_freed_magic; res->free(res); } LIQ_EXPORT LIQ_NONNULL double liq_get_quantization_error(const liq_result *result) { if (!CHECK_STRUCT_TYPE(result, liq_result)) return -1; if (result->palette_error >= 0) { return mse_to_standard_mse(result->palette_error); } return -1; } LIQ_EXPORT LIQ_NONNULL double liq_get_remapping_error(const liq_result *result) { if (!CHECK_STRUCT_TYPE(result, liq_result)) return -1; if (result->remapping && result->remapping->palette_error >= 0) { return mse_to_standard_mse(result->remapping->palette_error); } return -1; } LIQ_EXPORT LIQ_NONNULL int liq_get_quantization_quality(const liq_result *result) { if (!CHECK_STRUCT_TYPE(result, liq_result)) return -1; if (result->palette_error >= 0) { return mse_to_quality(result->palette_error); } return -1; } LIQ_EXPORT LIQ_NONNULL int liq_get_remapping_quality(const liq_result *result) { if (!CHECK_STRUCT_TYPE(result, liq_result)) return -1; if (result->remapping && result->remapping->palette_error >= 0) { return mse_to_quality(result->remapping->palette_error); } return -1; } LIQ_NONNULL static int compare_popularity(const void *ch1, const void *ch2) { const float v1 = ((const colormap_item*)ch1)->popularity; const float v2 = ((const colormap_item*)ch2)->popularity; return v1 > v2 ? -1 : 1; } LIQ_NONNULL static void sort_palette_qsort(colormap *map, int start, int nelem) { if (!nelem) return; qsort(map->palette + start, nelem, sizeof(map->palette[0]), compare_popularity); } #define SWAP_PALETTE(map, a,b) { \ const colormap_item tmp = (map)->palette[(a)]; \ (map)->palette[(a)] = (map)->palette[(b)]; \ (map)->palette[(b)] = tmp; } LIQ_NONNULL static void sort_palette(colormap *map, const liq_attr *options) { /* ** Step 3.5 [GRR]: remap the palette colors so that all entries with ** the maximal alpha value (i.e., fully opaque) are at the end and can ** therefore be omitted from the tRNS chunk. */ if (options->last_index_transparent) { for(unsigned int i=0; i < map->colors; i++) { if (map->palette[i].acolor.a < 1.f/256.f) { const unsigned int old = i, transparent_dest = map->colors-1; SWAP_PALETTE(map, transparent_dest, old); /* colors sorted by popularity make pngs slightly more compressible */ sort_palette_qsort(map, 0, map->colors-1); return; } } } unsigned int non_fixed_colors = 0; for(unsigned int i = 0; i < map->colors; i++) { if (map->palette[i].fixed) { break; } non_fixed_colors++; } /* move transparent colors to the beginning to shrink trns chunk */ unsigned int num_transparent = 0; for(unsigned int i = 0; i < non_fixed_colors; i++) { if (map->palette[i].acolor.a < 255.f/256.f) { // current transparent color is swapped with earlier opaque one if (i != num_transparent) { SWAP_PALETTE(map, num_transparent, i); i--; } num_transparent++; } } liq_verbose_printf(options, " eliminated opaque tRNS-chunk entries...%d entr%s transparent", num_transparent, (num_transparent == 1)? "y" : "ies"); /* colors sorted by popularity make pngs slightly more compressible * opaque and transparent are sorted separately */ sort_palette_qsort(map, 0, num_transparent); sort_palette_qsort(map, num_transparent, non_fixed_colors - num_transparent); if (non_fixed_colors > 9 && map->colors > 16) { SWAP_PALETTE(map, 7, 1); // slightly improves compression SWAP_PALETTE(map, 8, 2); SWAP_PALETTE(map, 9, 3); } } inline static unsigned int posterize_channel(unsigned int color, unsigned int bits) { return (color & ~((1<<bits)-1)) | (color >> (8-bits)); } LIQ_NONNULL static void set_rounded_palette(liq_palette *const dest, colormap *const map, const double gamma, unsigned int posterize) { float gamma_lut[256]; to_f_set_gamma(gamma_lut, gamma); dest->count = map->colors; for(unsigned int x = 0; x < map->colors; ++x) { rgba_pixel px = f_to_rgb(gamma, map->palette[x].acolor); px.r = posterize_channel(px.r, posterize); px.g = posterize_channel(px.g, posterize); px.b = posterize_channel(px.b, posterize); px.a = posterize_channel(px.a, posterize); map->palette[x].acolor = rgba_to_f(gamma_lut, px); /* saves rounding error introduced by to_rgb, which makes remapping & dithering more accurate */ if (!px.a && !map->palette[x].fixed) { px.r = 71; px.g = 112; px.b = 76; } dest->entries[x] = (liq_color){.r=px.r,.g=px.g,.b=px.b,.a=px.a}; } } LIQ_EXPORT LIQ_NONNULL const liq_palette *liq_get_palette(liq_result *result) { if (!CHECK_STRUCT_TYPE(result, liq_result)) return NULL; if (result->remapping && result->remapping->int_palette.count) { return &result->remapping->int_palette; } if (!result->int_palette.count) { set_rounded_palette(&result->int_palette, result->palette, result->gamma, result->min_posterization_output); } return &result->int_palette; } LIQ_NONNULL static float remap_to_palette(liq_image *const input_image, unsigned char *const *const output_pixels, colormap *const map) { const int rows = input_image->height; const unsigned int cols = input_image->width; double remapping_error=0; if (!liq_image_get_row_f_init(input_image)) { return -1; } if (input_image->background && !liq_image_get_row_f_init(input_image->background)) { return -1; } const colormap_item *acolormap = map->palette; struct nearest_map *const n = nearest_init(map); const int transparent_index = input_image->background ? nearest_search(n, &(f_pixel){0,0,0,0}, 0, NULL) : 0; const unsigned int max_threads = omp_get_max_threads(); LIQ_ARRAY(kmeans_state, average_color, (KMEANS_CACHE_LINE_GAP+map->colors) * max_threads); kmeans_init(map, max_threads, average_color); #if __GNUC__ >= 9 #pragma omp parallel for if (rows*cols > 3000) \ schedule(static) default(none) shared(acolormap,average_color,cols,input_image,map,n,output_pixels,rows,transparent_index) reduction(+:remapping_error) #else #pragma omp parallel for if (rows*cols > 3000) \ schedule(static) default(none) shared(acolormap) shared(average_color) reduction(+:remapping_error) #endif for(int row = 0; row < rows; ++row) { const f_pixel *const row_pixels = liq_image_get_row_f(input_image, row); const f_pixel *const bg_pixels = input_image->background && acolormap[transparent_index].acolor.a < 1.f/256.f ? liq_image_get_row_f(input_image->background, row) : NULL; unsigned int last_match=0; for(unsigned int col = 0; col < cols; ++col) { float diff; last_match = nearest_search(n, &row_pixels[col], last_match, &diff); if (bg_pixels && colordifference(bg_pixels[col], acolormap[last_match].acolor) <= diff) { last_match = transparent_index; } output_pixels[row][col] = last_match; remapping_error += diff; kmeans_update_color(row_pixels[col], 1.0, map, last_match, omp_get_thread_num(), average_color); } } kmeans_finalize(map, max_threads, average_color); nearest_free(n); return remapping_error / (input_image->width * input_image->height); } inline static f_pixel get_dithered_pixel(const float dither_level, const float max_dither_error, const f_pixel thiserr, const f_pixel px) { /* Use Floyd-Steinberg errors to adjust actual color. */ const float sr = thiserr.r * dither_level, sg = thiserr.g * dither_level, sb = thiserr.b * dither_level, sa = thiserr.a * dither_level; float ratio = 1.0; const float max_overflow = 1.1f; const float max_underflow = -0.1f; // allowing some overflow prevents undithered bands caused by clamping of all channels if (px.r + sr > max_overflow) ratio = MIN(ratio, (max_overflow -px.r)/sr); else { if (px.r + sr < max_underflow) ratio = MIN(ratio, (max_underflow-px.r)/sr); } if (px.g + sg > max_overflow) ratio = MIN(ratio, (max_overflow -px.g)/sg); else { if (px.g + sg < max_underflow) ratio = MIN(ratio, (max_underflow-px.g)/sg); } if (px.b + sb > max_overflow) ratio = MIN(ratio, (max_overflow -px.b)/sb); else { if (px.b + sb < max_underflow) ratio = MIN(ratio, (max_underflow-px.b)/sb); } float a = px.a + sa; if (a > 1.f) { a = 1.f; } else if (a < 0) { a = 0; } // If dithering error is crazy high, don't propagate it that much // This prevents crazy geen pixels popping out of the blue (or red or black! ;) const float dither_error = sr*sr + sg*sg + sb*sb + sa*sa; if (dither_error > max_dither_error) { ratio *= 0.8f; } else if (dither_error < 2.f/256.f/256.f) { // don't dither areas that don't have noticeable error — makes file smaller return px; } return (f_pixel) { .r=px.r + sr * ratio, .g=px.g + sg * ratio, .b=px.b + sb * ratio, .a=a, }; } /** Uses edge/noise map to apply dithering only to flat areas. Dithering on edges creates jagged lines, and noisy areas are "naturally" dithered. If output_image_is_remapped is true, only pixels noticeably changed by error diffusion will be written to output image. */ LIQ_NONNULL static bool remap_to_palette_floyd(liq_image *input_image, unsigned char *const output_pixels[], liq_remapping_result *quant, const float max_dither_error, const bool output_image_is_remapped) { const int rows = input_image->height, cols = input_image->width; const unsigned char *dither_map = quant->use_dither_map ? (input_image->dither_map ? input_image->dither_map : input_image->edges) : NULL; const colormap *map = quant->palette; const colormap_item *acolormap = map->palette; if (!liq_image_get_row_f_init(input_image)) { return false; } if (input_image->background && !liq_image_get_row_f_init(input_image->background)) { return false; } /* Initialize Floyd-Steinberg error vectors. */ const size_t errwidth = cols+2; f_pixel *restrict thiserr = input_image->malloc(errwidth * sizeof(thiserr[0]) * 2); // +2 saves from checking out of bounds access if (!thiserr) return false; f_pixel *restrict nexterr = thiserr + errwidth; memset(thiserr, 0, errwidth * sizeof(thiserr[0])); bool ok = true; struct nearest_map *const n = nearest_init(map); const int transparent_index = input_image->background ? nearest_search(n, &(f_pixel){0,0,0,0}, 0, NULL) : 0; // response to this value is non-linear and without it any value < 0.8 would give almost no dithering float base_dithering_level = quant->dither_level; base_dithering_level = 1.f - (1.f-base_dithering_level)*(1.f-base_dithering_level); if (dither_map) { base_dithering_level *= 1.f/255.f; // convert byte to float } base_dithering_level *= 15.f/16.f; // prevent small errors from accumulating int fs_direction = 1; unsigned int last_match=0; for (int row = 0; row < rows; ++row) { if (liq_remap_progress(quant, quant->progress_stage1 + row * (100.f - quant->progress_stage1) / rows)) { ok = false; break; } memset(nexterr, 0, errwidth * sizeof(nexterr[0])); int col = (fs_direction > 0) ? 0 : (cols - 1); const f_pixel *const row_pixels = liq_image_get_row_f(input_image, row); const f_pixel *const bg_pixels = input_image->background && acolormap[transparent_index].acolor.a < 1.f/256.f ? liq_image_get_row_f(input_image->background, row) : NULL; do { float dither_level = base_dithering_level; if (dither_map) { dither_level *= dither_map[row*cols + col]; } const f_pixel spx = get_dithered_pixel(dither_level, max_dither_error, thiserr[col + 1], row_pixels[col]); const unsigned int guessed_match = output_image_is_remapped ? output_pixels[row][col] : last_match; float diff; last_match = nearest_search(n, &spx, guessed_match, &diff); f_pixel output_px = acolormap[last_match].acolor; if (bg_pixels && colordifference(bg_pixels[col], output_px) <= diff) { output_px = bg_pixels[col]; output_pixels[row][col] = transparent_index; } else { output_pixels[row][col] = last_match; } f_pixel err = { .r = (spx.r - output_px.r), .g = (spx.g - output_px.g), .b = (spx.b - output_px.b), .a = (spx.a - output_px.a), }; // If dithering error is crazy high, don't propagate it that much // This prevents crazy geen pixels popping out of the blue (or red or black! ;) if (err.r*err.r + err.g*err.g + err.b*err.b + err.a*err.a > max_dither_error) { err.r *= 0.75f; err.g *= 0.75f; err.b *= 0.75f; err.a *= 0.75f; } /* Propagate Floyd-Steinberg error terms. */ if (fs_direction > 0) { thiserr[col + 2].a += err.a * (7.f/16.f); thiserr[col + 2].r += err.r * (7.f/16.f); thiserr[col + 2].g += err.g * (7.f/16.f); thiserr[col + 2].b += err.b * (7.f/16.f); nexterr[col + 2].a = err.a * (1.f/16.f); nexterr[col + 2].r = err.r * (1.f/16.f); nexterr[col + 2].g = err.g * (1.f/16.f); nexterr[col + 2].b = err.b * (1.f/16.f); nexterr[col + 1].a += err.a * (5.f/16.f); nexterr[col + 1].r += err.r * (5.f/16.f); nexterr[col + 1].g += err.g * (5.f/16.f); nexterr[col + 1].b += err.b * (5.f/16.f); nexterr[col ].a += err.a * (3.f/16.f); nexterr[col ].r += err.r * (3.f/16.f); nexterr[col ].g += err.g * (3.f/16.f); nexterr[col ].b += err.b * (3.f/16.f); } else { thiserr[col ].a += err.a * (7.f/16.f); thiserr[col ].r += err.r * (7.f/16.f); thiserr[col ].g += err.g * (7.f/16.f); thiserr[col ].b += err.b * (7.f/16.f); nexterr[col ].a = err.a * (1.f/16.f); nexterr[col ].r = err.r * (1.f/16.f); nexterr[col ].g = err.g * (1.f/16.f); nexterr[col ].b = err.b * (1.f/16.f); nexterr[col + 1].a += err.a * (5.f/16.f); nexterr[col + 1].r += err.r * (5.f/16.f); nexterr[col + 1].g += err.g * (5.f/16.f); nexterr[col + 1].b += err.b * (5.f/16.f); nexterr[col + 2].a += err.a * (3.f/16.f); nexterr[col + 2].r += err.r * (3.f/16.f); nexterr[col + 2].g += err.g * (3.f/16.f); nexterr[col + 2].b += err.b * (3.f/16.f); } // remapping is done in zig-zag col += fs_direction; if (fs_direction > 0) { if (col >= cols) break; } else { if (col < 0) break; } } while(1); f_pixel *const temperr = thiserr; thiserr = nexterr; nexterr = temperr; fs_direction = -fs_direction; } input_image->free(MIN(thiserr, nexterr)); // MIN because pointers were swapped nearest_free(n); return ok; } /* fixed colors are always included in the palette, so it would be wasteful to duplicate them in palette from histogram */ LIQ_NONNULL static void remove_fixed_colors_from_histogram(histogram *hist, const int fixed_colors_count, const f_pixel fixed_colors[], const float target_mse) { const float max_difference = MAX(target_mse/2.f, 2.f/256.f/256.f); if (fixed_colors_count) { for(int j=0; j < hist->size; j++) { for(unsigned int i=0; i < fixed_colors_count; i++) { if (colordifference(hist->achv[j].acolor, fixed_colors[i]) < max_difference) { hist->achv[j] = hist->achv[--hist->size]; // remove color from histogram by overwriting with the last entry j--; break; // continue searching histogram } } } } } LIQ_EXPORT LIQ_NONNULL liq_error liq_histogram_add_colors(liq_histogram *input_hist, const liq_attr *options, const liq_histogram_entry entries[], int num_entries, double gamma) { if (!CHECK_STRUCT_TYPE(options, liq_attr)) return LIQ_INVALID_POINTER; if (!CHECK_STRUCT_TYPE(input_hist, liq_histogram)) return LIQ_INVALID_POINTER; if (!CHECK_USER_POINTER(entries)) return LIQ_INVALID_POINTER; if (gamma < 0 || gamma >= 1.0) return LIQ_VALUE_OUT_OF_RANGE; if (num_entries <= 0 || num_entries > 1<<30) return LIQ_VALUE_OUT_OF_RANGE; if (input_hist->ignorebits > 0 && input_hist->had_image_added) { return LIQ_UNSUPPORTED; } input_hist->ignorebits = 0; input_hist->had_image_added = true; input_hist->gamma = gamma ? gamma : 0.45455; if (!input_hist->acht) { input_hist->acht = pam_allocacolorhash(~0, num_entries*num_entries, 0, options->malloc, options->free); if (!input_hist->acht) { return LIQ_OUT_OF_MEMORY; } } // Fake image size. It's only for hash size estimates. if (!input_hist->acht->cols) { input_hist->acht->cols = num_entries; } input_hist->acht->rows += num_entries; const unsigned int hash_size = input_hist->acht->hash_size; for(int i=0; i < num_entries; i++) { const rgba_pixel rgba = { .r = entries[i].color.r, .g = entries[i].color.g, .b = entries[i].color.b, .a = entries[i].color.a, }; union rgba_as_int px = {rgba}; unsigned int hash; if (px.rgba.a) { hash = px.l % hash_size; } else { hash=0; px.l=0; } if (!pam_add_to_hash(input_hist->acht, hash, entries[i].count, px, i, num_entries)) { return LIQ_OUT_OF_MEMORY; } } return LIQ_OK; } LIQ_EXPORT LIQ_NONNULL liq_error liq_histogram_add_image(liq_histogram *input_hist, const liq_attr *options, liq_image *input_image) { if (!CHECK_STRUCT_TYPE(options, liq_attr)) return LIQ_INVALID_POINTER; if (!CHECK_STRUCT_TYPE(input_hist, liq_histogram)) return LIQ_INVALID_POINTER; if (!CHECK_STRUCT_TYPE(input_image, liq_image)) return LIQ_INVALID_POINTER; const unsigned int cols = input_image->width, rows = input_image->height; if (!input_image->importance_map && options->use_contrast_maps) { contrast_maps(input_image); } input_hist->gamma = input_image->gamma; for(int i = 0; i < input_image->fixed_colors_count; i++) { liq_error res = liq_histogram_add_fixed_color_f(input_hist, input_image->fixed_colors[i]); if (res != LIQ_OK) { return res; } } /* ** Step 2: attempt to make a histogram of the colors, unclustered. ** If at first we don't succeed, increase ignorebits to increase color ** coherence and try again. */ if (liq_progress(options, options->progress_stage1 * 0.4f)) { return LIQ_ABORTED; } const bool all_rows_at_once = liq_image_can_use_rgba_rows(input_image); // Usual solution is to start from scratch when limit is exceeded, but that's not possible if it's not // the first image added const unsigned int max_histogram_entries = input_hist->had_image_added ? ~0 : options->max_histogram_entries; do { if (!input_hist->acht) { input_hist->acht = pam_allocacolorhash(max_histogram_entries, rows*cols, input_hist->ignorebits, options->malloc, options->free); } if (!input_hist->acht) return LIQ_OUT_OF_MEMORY; // histogram uses noise contrast map for importance. Color accuracy in noisy areas is not very important. // noise map does not include edges to avoid ruining anti-aliasing for(unsigned int row=0; row < rows; row++) { bool added_ok; if (all_rows_at_once) { added_ok = pam_computeacolorhash(input_hist->acht, (const rgba_pixel *const *)input_image->rows, cols, rows, input_image->importance_map); if (added_ok) break; } else { const rgba_pixel* rows_p[1] = { liq_image_get_row_rgba(input_image, row) }; added_ok = pam_computeacolorhash(input_hist->acht, rows_p, cols, 1, input_image->importance_map ? &input_image->importance_map[row * cols] : NULL); } if (!added_ok) { input_hist->ignorebits++; liq_verbose_printf(options, " too many colors! Scaling colors to improve clustering... %d", input_hist->ignorebits); pam_freeacolorhash(input_hist->acht); input_hist->acht = NULL; if (liq_progress(options, options->progress_stage1 * 0.6f)) return LIQ_ABORTED; break; } } } while(!input_hist->acht); input_hist->had_image_added = true; liq_image_free_importance_map(input_image); if (input_image->free_pixels && input_image->f_pixels) { liq_image_free_rgba_source(input_image); // bow can free the RGBA source if copy has been made in f_pixels } return LIQ_OK; } LIQ_NONNULL static liq_error finalize_histogram(liq_histogram *input_hist, liq_attr *options, histogram **hist_output) { if (liq_progress(options, options->progress_stage1 * 0.9f)) { return LIQ_ABORTED; } if (!input_hist->acht) { return LIQ_BITMAP_NOT_AVAILABLE; } histogram *hist = pam_acolorhashtoacolorhist(input_hist->acht, input_hist->gamma, options->malloc, options->free); pam_freeacolorhash(input_hist->acht); input_hist->acht = NULL; if (!hist) { return LIQ_OUT_OF_MEMORY; } liq_verbose_printf(options, " made histogram...%d colors found", hist->size); remove_fixed_colors_from_histogram(hist, input_hist->fixed_colors_count, input_hist->fixed_colors, options->target_mse); *hist_output = hist; return LIQ_OK; } LIQ_NONNULL static void modify_alpha(liq_image *input_image, rgba_pixel *const row_pixels) { /* IE6 makes colors with even slightest transparency completely transparent, thus to improve situation in IE, make colors that are less than ~10% transparent completely opaque */ const float min_opaque_val = input_image->min_opaque_val; const float almost_opaque_val = min_opaque_val * 169.f/256.f; const unsigned int almost_opaque_val_int = (min_opaque_val * 169.f/256.f)*255.f; for(unsigned int col = 0; col < input_image->width; col++) { const rgba_pixel px = row_pixels[col]; /* ie bug: to avoid visible step caused by forced opaqueness, linearily raise opaqueness of almost-opaque colors */ if (px.a >= almost_opaque_val_int) { float al = px.a / 255.f; al = almost_opaque_val + (al-almost_opaque_val) * (1.f-almost_opaque_val) / (min_opaque_val-almost_opaque_val); al *= 256.f; row_pixels[col].a = al >= 255.f ? 255 : al; } } } /** Builds two maps: importance_map - approximation of areas with high-frequency noise, except straight edges. 1=flat, 0=noisy. edges - noise map including all edges */ LIQ_NONNULL static void contrast_maps(liq_image *image) { const unsigned int cols = image->width, rows = image->height; if (cols < 4 || rows < 4 || (3*cols*rows) > LIQ_HIGH_MEMORY_LIMIT) { return; } unsigned char *restrict noise = image->importance_map ? image->importance_map : image->malloc(cols*rows); image->importance_map = NULL; unsigned char *restrict edges = image->edges ? image->edges : image->malloc(cols*rows); image->edges = NULL; unsigned char *restrict tmp = image->malloc(cols*rows); if (!noise || !edges || !tmp || !liq_image_get_row_f_init(image)) { image->free(noise); image->free(edges); image->free(tmp); return; } const f_pixel *curr_row, *prev_row, *next_row; curr_row = prev_row = next_row = liq_image_get_row_f(image, 0); for (unsigned int j=0; j < rows; j++) { prev_row = curr_row; curr_row = next_row; next_row = liq_image_get_row_f(image, MIN(rows-1,j+1)); f_pixel prev, curr = curr_row[0], next=curr; for (unsigned int i=0; i < cols; i++) { prev=curr; curr=next; next = curr_row[MIN(cols-1,i+1)]; // contrast is difference between pixels neighbouring horizontally and vertically const float a = fabsf(prev.a+next.a - curr.a*2.f), r = fabsf(prev.r+next.r - curr.r*2.f), g = fabsf(prev.g+next.g - curr.g*2.f), b = fabsf(prev.b+next.b - curr.b*2.f); const f_pixel prevl = prev_row[i]; const f_pixel nextl = next_row[i]; const float a1 = fabsf(prevl.a+nextl.a - curr.a*2.f), r1 = fabsf(prevl.r+nextl.r - curr.r*2.f), g1 = fabsf(prevl.g+nextl.g - curr.g*2.f), b1 = fabsf(prevl.b+nextl.b - curr.b*2.f); const float horiz = MAX(MAX(a,r),MAX(g,b)); const float vert = MAX(MAX(a1,r1),MAX(g1,b1)); const float edge = MAX(horiz,vert); float z = edge - fabsf(horiz-vert)*.5f; z = 1.f - MAX(z,MIN(horiz,vert)); z *= z; // noise is amplified z *= z; // 85 is about 1/3rd of weight (not 0, because noisy pixels still need to be included, just not as precisely). const unsigned int z_int = 85 + (unsigned int)(z * 171.f); noise[j*cols+i] = MIN(z_int, 255); const int e_int = 255 - (int)(edge * 256.f); edges[j*cols+i] = e_int > 0 ? MIN(e_int, 255) : 0; } } // noise areas are shrunk and then expanded to remove thin edges from the map liq_max3(noise, tmp, cols, rows); liq_max3(tmp, noise, cols, rows); liq_blur(noise, tmp, noise, cols, rows, 3); liq_max3(noise, tmp, cols, rows); liq_min3(tmp, noise, cols, rows); liq_min3(noise, tmp, cols, rows); liq_min3(tmp, noise, cols, rows); liq_min3(edges, tmp, cols, rows); liq_max3(tmp, edges, cols, rows); for(unsigned int i=0; i < cols*rows; i++) edges[i] = MIN(noise[i], edges[i]); image->free(tmp); image->importance_map = noise; image->edges = edges; } /** * Builds map of neighbor pixels mapped to the same palette entry * * For efficiency/simplicity it mainly looks for same consecutive pixels horizontally * and peeks 1 pixel above/below. Full 2d algorithm doesn't improve it significantly. * Correct flood fill doesn't have visually good properties. */ LIQ_NONNULL static void update_dither_map(liq_image *input_image, unsigned char *const *const row_pointers, colormap *map) { const unsigned int width = input_image->width; const unsigned int height = input_image->height; unsigned char *const edges = input_image->edges; for(unsigned int row=0; row < height; row++) { unsigned char lastpixel = row_pointers[row][0]; unsigned int lastcol=0; for(unsigned int col=1; col < width; col++) { const unsigned char px = row_pointers[row][col]; if (input_image->background && map->palette[px].acolor.a < 1.f/256.f) { // Transparency may or may not create an edge. When there's an explicit background set, assume no edge. continue; } if (px != lastpixel || col == width-1) { int neighbor_count = 10 * (col-lastcol); unsigned int i=lastcol; while(i < col) { if (row > 0) { unsigned char pixelabove = row_pointers[row-1][i]; if (pixelabove == lastpixel) neighbor_count += 15; } if (row < height-1) { unsigned char pixelbelow = row_pointers[row+1][i]; if (pixelbelow == lastpixel) neighbor_count += 15; } i++; } while(lastcol <= col) { int e = edges[row*width + lastcol]; edges[row*width + lastcol++] = (e+128) * (255.f/(255+128)) * (1.f - 20.f / (20 + neighbor_count)); } lastpixel = px; } } } input_image->dither_map = input_image->edges; input_image->edges = NULL; } /** * Palette can be NULL, in which case it creates a new palette from scratch. */ static colormap *add_fixed_colors_to_palette(colormap *palette, const int max_colors, const f_pixel fixed_colors[], const int fixed_colors_count, void* (*malloc)(size_t), void (*free)(void*)) { if (!fixed_colors_count) return palette; colormap *newpal = pam_colormap(MIN(max_colors, (palette ? palette->colors : 0) + fixed_colors_count), malloc, free); unsigned int i=0; if (palette && fixed_colors_count < max_colors) { unsigned int palette_max = MIN(palette->colors, max_colors - fixed_colors_count); for(; i < palette_max; i++) { newpal->palette[i] = palette->palette[i]; } } for(int j=0; j < MIN(max_colors, fixed_colors_count); j++) { newpal->palette[i++] = (colormap_item){ .acolor = fixed_colors[j], .fixed = true, }; } if (palette) pam_freecolormap(palette); return newpal; } LIQ_NONNULL static void adjust_histogram_callback(hist_item *item, float diff) { item->adjusted_weight = (item->perceptual_weight+item->adjusted_weight) * (sqrtf(1.f+diff)); } /** Repeats mediancut with different histogram weights to find palette with minimum error. feedback_loop_trials controls how long the search will take. < 0 skips the iteration. */ static colormap *find_best_palette(histogram *hist, const liq_attr *options, const double max_mse, const f_pixel fixed_colors[], const unsigned int fixed_colors_count, double *palette_error_p) { unsigned int max_colors = options->max_colors; // if output is posterized it doesn't make sense to aim for perfrect colors, so increase target_mse // at this point actual gamma is not set, so very conservative posterization estimate is used const double target_mse = MIN(max_mse, MAX(options->target_mse, pow((1<<options->min_posterization_output)/1024.0, 2))); int feedback_loop_trials = options->feedback_loop_trials; if (hist->size > 5000) {feedback_loop_trials = (feedback_loop_trials*3 + 3)/4;} if (hist->size > 25000) {feedback_loop_trials = (feedback_loop_trials*3 + 3)/4;} if (hist->size > 50000) {feedback_loop_trials = (feedback_loop_trials*3 + 3)/4;} if (hist->size > 100000) {feedback_loop_trials = (feedback_loop_trials*3 + 3)/4;} colormap *acolormap = NULL; double least_error = MAX_DIFF; double target_mse_overshoot = feedback_loop_trials>0 ? 1.05 : 1.0; const float total_trials = (float)(feedback_loop_trials>0?feedback_loop_trials:1); do { colormap *newmap; if (hist->size && fixed_colors_count < max_colors) { newmap = mediancut(hist, max_colors-fixed_colors_count, target_mse * target_mse_overshoot, MAX(MAX(45.0/65536.0, target_mse), least_error)*1.2, options->malloc, options->free); } else { feedback_loop_trials = 0; newmap = NULL; } newmap = add_fixed_colors_to_palette(newmap, max_colors, fixed_colors, fixed_colors_count, options->malloc, options->free); if (!newmap) { return NULL; } if (feedback_loop_trials <= 0) { return newmap; } // after palette has been created, total error (MSE) is calculated to keep the best palette // at the same time K-Means iteration is done to improve the palette // and histogram weights are adjusted based on remapping error to give more weight to poorly matched colors const bool first_run_of_target_mse = !acolormap && target_mse > 0; double total_error = kmeans_do_iteration(hist, newmap, first_run_of_target_mse ? NULL : adjust_histogram_callback); // goal is to increase quality or to reduce number of colors used if quality is good enough if (!acolormap || total_error < least_error || (total_error <= target_mse && newmap->colors < max_colors)) { if (acolormap) pam_freecolormap(acolormap); acolormap = newmap; if (total_error < target_mse && total_error > 0) { // K-Means iteration improves quality above what mediancut aims for // this compensates for it, making mediancut aim for worse target_mse_overshoot = MIN(target_mse_overshoot*1.25, target_mse/total_error); } least_error = total_error; // if number of colors could be reduced, try to keep it that way // but allow extra color as a bit of wiggle room in case quality can be improved too max_colors = MIN(newmap->colors+1, max_colors); feedback_loop_trials -= 1; // asymptotic improvement could make it go on forever } else { for(unsigned int j=0; j < hist->size; j++) { hist->achv[j].adjusted_weight = (hist->achv[j].perceptual_weight + hist->achv[j].adjusted_weight)/2.0; } target_mse_overshoot = 1.0; feedback_loop_trials -= 6; // if error is really bad, it's unlikely to improve, so end sooner if (total_error > least_error*4) feedback_loop_trials -= 3; pam_freecolormap(newmap); } float fraction_done = 1.f-MAX(0.f, feedback_loop_trials/total_trials); if (liq_progress(options, options->progress_stage1 + fraction_done * options->progress_stage2)) break; liq_verbose_printf(options, " selecting colors...%d%%", (int)(100.f * fraction_done)); } while(feedback_loop_trials > 0); *palette_error_p = least_error; return acolormap; } static colormap *histogram_to_palette(const histogram *hist, const liq_attr *options) { if (!hist->size) { return NULL; } colormap *acolormap = pam_colormap(hist->size, options->malloc, options->free); for(unsigned int i=0; i < hist->size; i++) { acolormap->palette[i].acolor = hist->achv[i].acolor; acolormap->palette[i].popularity = hist->achv[i].perceptual_weight; } return acolormap; } LIQ_NONNULL static liq_error pngquant_quantize(histogram *hist, const liq_attr *options, const int fixed_colors_count, const f_pixel fixed_colors[], const double gamma, bool fixed_result_colors, liq_result **result_output) { colormap *acolormap; double palette_error = -1; assert((verbose_print(options, "SLOW debug checks enabled. Recompile with NDEBUG for normal operation."),1)); const bool few_input_colors = hist->size+fixed_colors_count <= options->max_colors; if (liq_progress(options, options->progress_stage1)) return LIQ_ABORTED; // If image has few colors to begin with (and no quality degradation is required) // then it's possible to skip quantization entirely if (few_input_colors && options->target_mse == 0) { acolormap = add_fixed_colors_to_palette(histogram_to_palette(hist, options), options->max_colors, fixed_colors, fixed_colors_count, options->malloc, options->free); palette_error = 0; } else { const double max_mse = options->max_mse * (few_input_colors ? 0.33 : 1.0); // when degrading image that's already paletted, require much higher improvement, since pal2pal often looks bad and there's little gain acolormap = find_best_palette(hist, options, max_mse, fixed_colors, fixed_colors_count, &palette_error); if (!acolormap) { return LIQ_VALUE_OUT_OF_RANGE; } // K-Means iteration approaches local minimum for the palette double iteration_limit = options->kmeans_iteration_limit; unsigned int iterations = options->kmeans_iterations; if (!iterations && palette_error < 0 && max_mse < MAX_DIFF) iterations = 1; // otherwise total error is never calculated and MSE limit won't work if (iterations) { // likely_colormap_index (used and set in kmeans_do_iteration) can't point to index outside colormap if (acolormap->colors < 256) for(unsigned int j=0; j < hist->size; j++) { if (hist->achv[j].tmp.likely_colormap_index >= acolormap->colors) { hist->achv[j].tmp.likely_colormap_index = 0; // actual value doesn't matter, as the guess is out of date anyway } } if (hist->size > 5000) {iterations = (iterations*3 + 3)/4;} if (hist->size > 25000) {iterations = (iterations*3 + 3)/4;} if (hist->size > 50000) {iterations = (iterations*3 + 3)/4;} if (hist->size > 100000) {iterations = (iterations*3 + 3)/4; iteration_limit *= 2;} verbose_print(options, " moving colormap towards local minimum"); double previous_palette_error = MAX_DIFF; for(unsigned int i=0; i < iterations; i++) { palette_error = kmeans_do_iteration(hist, acolormap, NULL); if (liq_progress(options, options->progress_stage1 + options->progress_stage2 + (i * options->progress_stage3 * 0.9f) / iterations)) { break; } if (fabs(previous_palette_error-palette_error) < iteration_limit) { break; } if (palette_error > max_mse*1.5) { // probably hopeless if (palette_error > max_mse*3.0) break; // definitely hopeless i++; } previous_palette_error = palette_error; } } if (palette_error > max_mse) { liq_verbose_printf(options, " image degradation MSE=%.3f (Q=%d) exceeded limit of %.3f (%d)", mse_to_standard_mse(palette_error), mse_to_quality(palette_error), mse_to_standard_mse(max_mse), mse_to_quality(max_mse)); pam_freecolormap(acolormap); return LIQ_QUALITY_TOO_LOW; } } if (liq_progress(options, options->progress_stage1 + options->progress_stage2 + options->progress_stage3 * 0.95f)) { pam_freecolormap(acolormap); return LIQ_ABORTED; } sort_palette(acolormap, options); // If palette was created from a multi-image histogram, // then it shouldn't be optimized for one image during remapping if (fixed_result_colors) { for(unsigned int i=0; i < acolormap->colors; i++) { acolormap->palette[i].fixed = true; } } liq_result *result = options->malloc(sizeof(liq_result)); if (!result) return LIQ_OUT_OF_MEMORY; *result = (liq_result){ .magic_header = liq_result_magic, .malloc = options->malloc, .free = options->free, .palette = acolormap, .palette_error = palette_error, .use_dither_map = options->use_dither_map, .gamma = gamma, .min_posterization_output = options->min_posterization_output, }; *result_output = result; return LIQ_OK; } LIQ_EXPORT LIQ_NONNULL liq_error liq_write_remapped_image(liq_result *result, liq_image *input_image, void *buffer, size_t buffer_size) { if (!CHECK_STRUCT_TYPE(result, liq_result)) { return LIQ_INVALID_POINTER; } if (!CHECK_STRUCT_TYPE(input_image, liq_image)) { return LIQ_INVALID_POINTER; } if (!CHECK_USER_POINTER(buffer)) { return LIQ_INVALID_POINTER; } const size_t required_size = input_image->width * input_image->height; if (buffer_size < required_size) { return LIQ_BUFFER_TOO_SMALL; } LIQ_ARRAY(unsigned char *, rows, input_image->height); unsigned char *buffer_bytes = buffer; for(unsigned int i=0; i < input_image->height; i++) { rows[i] = &buffer_bytes[input_image->width * i]; } return liq_write_remapped_image_rows(result, input_image, rows); } LIQ_EXPORT LIQ_NONNULL liq_error liq_write_remapped_image_rows(liq_result *quant, liq_image *input_image, unsigned char **row_pointers) { if (!CHECK_STRUCT_TYPE(quant, liq_result)) return LIQ_INVALID_POINTER; if (!CHECK_STRUCT_TYPE(input_image, liq_image)) return LIQ_INVALID_POINTER; for(unsigned int i=0; i < input_image->height; i++) { if (!CHECK_USER_POINTER(row_pointers+i) || !CHECK_USER_POINTER(row_pointers[i])) return LIQ_INVALID_POINTER; } if (quant->remapping) { liq_remapping_result_destroy(quant->remapping); } liq_remapping_result *const result = quant->remapping = liq_remapping_result_create(quant); if (!result) return LIQ_OUT_OF_MEMORY; if (!input_image->edges && !input_image->dither_map && quant->use_dither_map) { contrast_maps(input_image); } if (liq_remap_progress(result, result->progress_stage1 * 0.25f)) { return LIQ_ABORTED; } /* ** Step 4: map the colors in the image to their closest match in the ** new colormap, and write 'em out. */ float remapping_error = result->palette_error; if (result->dither_level == 0) { set_rounded_palette(&result->int_palette, result->palette, result->gamma, quant->min_posterization_output); remapping_error = remap_to_palette(input_image, row_pointers, result->palette); } else { const bool is_image_huge = (input_image->width * input_image->height) > 2000 * 2000; const bool allow_dither_map = result->use_dither_map == 2 || (!is_image_huge && result->use_dither_map); const bool generate_dither_map = allow_dither_map && (input_image->edges && !input_image->dither_map); if (generate_dither_map) { // If dithering (with dither map) is required, this image is used to find areas that require dithering remapping_error = remap_to_palette(input_image, row_pointers, result->palette); update_dither_map(input_image, row_pointers, result->palette); } if (liq_remap_progress(result, result->progress_stage1 * 0.5f)) { return LIQ_ABORTED; } // remapping above was the last chance to do K-Means iteration, hence the final palette is set after remapping set_rounded_palette(&result->int_palette, result->palette, result->gamma, quant->min_posterization_output); if (!remap_to_palette_floyd(input_image, row_pointers, result, MAX(remapping_error*2.4, 16.f/256.f), generate_dither_map)) { return LIQ_ABORTED; } } // remapping error from dithered image is absurd, so always non-dithered value is used // palette_error includes some perceptual weighting from histogram which is closer correlated with dssim // so that should be used when possible. if (result->palette_error < 0) { result->palette_error = remapping_error; } return LIQ_OK; } LIQ_EXPORT int liq_version() { return LIQ_VERSION; }